diff --git a/assets/ambassador/ambassador-6.7.1100.tgz b/assets/ambassador/ambassador-6.7.1100.tgz new file mode 100644 index 000000000..4a1ec9bac Binary files /dev/null and b/assets/ambassador/ambassador-6.7.1100.tgz differ diff --git a/assets/artifactory-ha/artifactory-ha-3.0.1400.tgz b/assets/artifactory-ha/artifactory-ha-3.0.1400.tgz new file mode 100644 index 000000000..bedccb9f6 Binary files /dev/null and b/assets/artifactory-ha/artifactory-ha-3.0.1400.tgz differ diff --git a/assets/artifactory-ha/artifactory-ha-4.13.000.tgz b/assets/artifactory-ha/artifactory-ha-4.13.000.tgz new file mode 100644 index 000000000..79a2d99f9 Binary files /dev/null and b/assets/artifactory-ha/artifactory-ha-4.13.000.tgz differ diff --git a/assets/artifactory-ha/artifactory-ha-4.7.600.tgz b/assets/artifactory-ha/artifactory-ha-4.7.600.tgz new file mode 100644 index 000000000..0ea201c47 Binary files /dev/null and b/assets/artifactory-ha/artifactory-ha-4.7.600.tgz differ diff --git a/assets/artifactory-jcr/artifactory-jcr-2.5.100.tgz b/assets/artifactory-jcr/artifactory-jcr-2.5.100.tgz new file mode 100644 index 000000000..d38b263f0 Binary files /dev/null and b/assets/artifactory-jcr/artifactory-jcr-2.5.100.tgz differ diff --git a/assets/artifactory-jcr/artifactory-jcr-3.4.000.tgz b/assets/artifactory-jcr/artifactory-jcr-3.4.000.tgz new file mode 100644 index 000000000..7335cf942 Binary files /dev/null and b/assets/artifactory-jcr/artifactory-jcr-3.4.000.tgz differ diff --git a/assets/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway-1.2.100.tgz b/assets/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway-1.2.100.tgz new file mode 100644 index 000000000..24e6579ec Binary files /dev/null and b/assets/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway-1.2.100.tgz differ diff --git a/assets/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller-1.8.2800.tgz b/assets/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller-1.8.2800.tgz new file mode 100644 index 000000000..90f4def03 Binary files /dev/null and b/assets/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller-1.8.2800.tgz differ diff --git a/assets/cloudcasa/cloudcasa-0.1.000.tgz b/assets/cloudcasa/cloudcasa-0.1.000.tgz new file mode 100644 index 000000000..01cac8f20 Binary files /dev/null and b/assets/cloudcasa/cloudcasa-0.1.000.tgz differ diff --git a/assets/cloudcasa/cloudcasa-1.tgz b/assets/cloudcasa/cloudcasa-1.tgz new file mode 100644 index 000000000..dad3d56ef Binary files /dev/null and b/assets/cloudcasa/cloudcasa-1.tgz differ diff --git a/assets/cockroachdb/cockroachdb-4.1.200.tgz b/assets/cockroachdb/cockroachdb-4.1.200.tgz new file mode 100644 index 000000000..5ce844838 Binary files /dev/null and b/assets/cockroachdb/cockroachdb-4.1.200.tgz differ diff --git a/assets/csi-wekafs/csi-wekafsplugin-0.6.400.tgz b/assets/csi-wekafs/csi-wekafsplugin-0.6.400.tgz new file mode 100644 index 000000000..18ad93426 Binary files /dev/null and b/assets/csi-wekafs/csi-wekafsplugin-0.6.400.tgz differ diff --git a/assets/datadog/datadog-2.4.200.tgz b/assets/datadog/datadog-2.4.200.tgz new file mode 100644 index 000000000..7240ccb53 Binary files /dev/null and b/assets/datadog/datadog-2.4.200.tgz differ diff --git a/assets/dynatrace-oneagent-operator/dynatrace-oneagent-operator-0.8.000.tgz b/assets/dynatrace-oneagent-operator/dynatrace-oneagent-operator-0.8.000.tgz new file mode 100644 index 000000000..6911e8d55 Binary files /dev/null and b/assets/dynatrace-oneagent-operator/dynatrace-oneagent-operator-0.8.000.tgz differ diff --git a/assets/falcon-sensor/falcon-sensor-0.9.300.tgz b/assets/falcon-sensor/falcon-sensor-0.9.300.tgz new file mode 100644 index 000000000..6506a9f7d Binary files /dev/null and b/assets/falcon-sensor/falcon-sensor-0.9.300.tgz differ diff --git a/assets/federatorai/federatorai-4.5.100.tgz b/assets/federatorai/federatorai-4.5.100.tgz new file mode 100644 index 000000000..30652719d Binary files /dev/null and b/assets/federatorai/federatorai-4.5.100.tgz differ diff --git a/assets/haproxy/haproxy-1.12.100.tgz b/assets/haproxy/haproxy-1.12.100.tgz new file mode 100644 index 000000000..4eca84851 Binary files /dev/null and b/assets/haproxy/haproxy-1.12.100.tgz differ diff --git a/assets/haproxy/haproxy-1.12.500.tgz b/assets/haproxy/haproxy-1.12.500.tgz new file mode 100644 index 000000000..a4248f5cf Binary files /dev/null and b/assets/haproxy/haproxy-1.12.500.tgz differ diff --git a/assets/haproxy/haproxy-1.4.300.tgz b/assets/haproxy/haproxy-1.4.300.tgz new file mode 100644 index 000000000..9b78e1b47 Binary files /dev/null and b/assets/haproxy/haproxy-1.4.300.tgz differ diff --git a/assets/hpe-csi-driver/hpe-csi-driver-1.3.000.tgz b/assets/hpe-csi-driver/hpe-csi-driver-1.3.000.tgz new file mode 100644 index 000000000..68a3925de Binary files /dev/null and b/assets/hpe-csi-driver/hpe-csi-driver-1.3.000.tgz differ diff --git a/assets/hpe-csi-driver/hpe-csi-driver-1.4.200.tgz b/assets/hpe-csi-driver/hpe-csi-driver-1.4.200.tgz new file mode 100644 index 000000000..22acf87bb Binary files /dev/null and b/assets/hpe-csi-driver/hpe-csi-driver-1.4.200.tgz differ diff --git a/assets/hpe-flexvolume-driver/hpe-flexvolume-driver-3.1.000.tgz b/assets/hpe-flexvolume-driver/hpe-flexvolume-driver-3.1.000.tgz new file mode 100644 index 000000000..e8b9a12d7 Binary files /dev/null and b/assets/hpe-flexvolume-driver/hpe-flexvolume-driver-3.1.000.tgz differ diff --git a/assets/instana-agent/instana-agent-1.0.2900.tgz b/assets/instana-agent/instana-agent-1.0.2900.tgz new file mode 100644 index 000000000..2691693f5 Binary files /dev/null and b/assets/instana-agent/instana-agent-1.0.2900.tgz differ diff --git a/assets/k8s-triliovault-operator/k8s-triliovault-operator-2.0.500.tgz b/assets/k8s-triliovault-operator/k8s-triliovault-operator-2.0.500.tgz new file mode 100644 index 000000000..ac0b78922 Binary files /dev/null and b/assets/k8s-triliovault-operator/k8s-triliovault-operator-2.0.500.tgz differ diff --git a/assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz b/assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz new file mode 100644 index 000000000..89e8d7b12 Binary files /dev/null and b/assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz differ diff --git a/assets/kubecost/cost-analyzer-1.70.000.tgz b/assets/kubecost/cost-analyzer-1.70.000.tgz new file mode 100644 index 000000000..65fea3b30 Binary files /dev/null and b/assets/kubecost/cost-analyzer-1.70.000.tgz differ diff --git a/assets/logos/cloudcasa.png b/assets/logos/cloudcasa.png new file mode 100644 index 000000000..7a6660f2a Binary files /dev/null and b/assets/logos/cloudcasa.png differ diff --git a/assets/nutanix-csi-storage/nutanix-csi-storage-2.3.100.tgz b/assets/nutanix-csi-storage/nutanix-csi-storage-2.3.100.tgz new file mode 100644 index 000000000..6c7a2b491 Binary files /dev/null and b/assets/nutanix-csi-storage/nutanix-csi-storage-2.3.100.tgz differ diff --git a/assets/openebs/openebs-1.12.300.tgz b/assets/openebs/openebs-1.12.300.tgz new file mode 100644 index 000000000..9c6e94f23 Binary files /dev/null and b/assets/openebs/openebs-1.12.300.tgz differ diff --git a/assets/portshift-operator/portshift-operator-0.1.000.tgz b/assets/portshift-operator/portshift-operator-0.1.000.tgz new file mode 100644 index 000000000..28a9030bd Binary files /dev/null and b/assets/portshift-operator/portshift-operator-0.1.000.tgz differ diff --git a/assets/streamsets/control-agent-2.0.100.tgz b/assets/streamsets/control-agent-2.0.100.tgz new file mode 100644 index 000000000..100f13c67 Binary files /dev/null and b/assets/streamsets/control-agent-2.0.100.tgz differ diff --git a/assets/sysdig/sysdig-1.9.200.tgz b/assets/sysdig/sysdig-1.9.200.tgz new file mode 100644 index 000000000..9280f09d0 Binary files /dev/null and b/assets/sysdig/sysdig-1.9.200.tgz differ diff --git a/assets/universal-crossplane/universal-crossplane-1.2.200100.tgz b/assets/universal-crossplane/universal-crossplane-1.2.200100.tgz new file mode 100644 index 000000000..e0f4ceb70 Binary files /dev/null and b/assets/universal-crossplane/universal-crossplane-1.2.200100.tgz differ diff --git a/charts/ambassador/ambassador/6.7.1100/.helmignore b/charts/ambassador/ambassador/6.7.1100/.helmignore new file mode 100644 index 000000000..a0482efdf --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +OWNERS diff --git a/charts/ambassador/ambassador/6.7.1100/CHANGELOG.md b/charts/ambassador/ambassador/6.7.1100/CHANGELOG.md new file mode 100644 index 000000000..199e56fee --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/CHANGELOG.md @@ -0,0 +1,528 @@ +# Change Log + +This file documents all notable changes to Ambassador Helm Chart. The release +numbering uses [semantic versioning](http://semver.org). + +## Next Release + +(no changes yet) + +## v6.7.11 + +- Update Ambassador API Gateway chart image to version v1.13.8: [CHANGELOG](https://github.com/emissary-ingress/emissary/blob/master/CHANGELOG.md) +- Update Ambassador Edge Stack chart image to version v1.13.8: [CHANGELOG](https://github.com/emissary-ingress/emissary/blob/master/CHANGELOG.md) +- Bugfix: remove duplicate label key in ambassador-agent deployment + +## v6.7.10 + +- Update Ambassador API Gateway chart image to version v1.13.7: [CHANGELOG](https://github.com/emissary-ingress/emissary/blob/master/CHANGELOG.md) +- Update Ambassador Edge Stack chart image to version v1.13.7: [CHANGELOG](https://github.com/emissary-ingress/emissary/blob/master/CHANGELOG.md) + +## v6.7.9 + +- Update Ambassador chart image to version 1.13.6: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + + +## v6.7.8 + +- Update Ambassador chart image to version 1.13.5: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + + +## v6.7.7 + +- Bugfix: ambassador-injector and telepresence-proxy now use the correct default image repository + +## v6.7.6 + +- Update Ambassador chart image to version 1.13.4: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Change: unless image.repository or image.fullImageOverride is explicitly set, the ambassador image used will be templated on .Values.enableAES. If AES is enabled, the chart will use docker.io/datawire/aes, otherwise will use docker.io/datawire/ambassador. + +## v6.7.5 + +- Update Ambassador chart image to version v1.13.3: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.7.4 + +- Feature: The [Ambassador Module](https://www.getambassador.io/docs/edge-stack/latest/topics/running/ambassador/) can now be configured and managed by Helm + +## v6.7.3 + +- Update Ambassador chart image to version v1.13.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.7.2 + +- Bugfix: Don't change the Role name when running in singleNamespace mode. + +## v6.7.1 + +- Update Ambassador chart image to version v1.13.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.7.0 + +- Update Ambassador to version 1.13.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Feature: Ambassador Agent now available for API Gateway (https://app.getambassador.io) +- Feature: Add support for [pod toplology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) via the `topologySpreadConstraints` helm value to the Ambassador deployment. (thanks, [@lawliet89](https://github.com/lawliet89)!) +- BugFix: Add missing `ambassador_id` for resolvers. +- Change: Ambassador ClusterRoles are now aggregated under the label `rbac.getambassador.io/role-group`. The aggregated role has the same name as the previous role name (so no need to update ClusterRoleBindings). + +## v6.6.4 + +- Update Ambassador to version 1.12.4: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.6.3 + +- Update Ambassador to version 1.12.3: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.6.2 + +- Update Ambassador to version 1.12.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.6.1 + +- Fix metadata field in ConsulRevoler +- Make resolvers available to OSS + +## v6.6.0 + +- Update Ambassador to version 1.12.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Feature: Apply Ambassador Agent deployment by default to enable Service Catalog reporting (https://app.getambassador.io) + +## v6.5.22 + +- Bugfix: Disable the cloud agent by default. The agent will be enabled in 6.6.0. +- Bugfix: Adds a check to prevent the cloud agent from being installed if AES version is less than 1.12.0 + +## v6.5.21 + +- Update Ambassador to version 1.12.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Feature: Add support for the ambassador-agent, reporting to Service Catalog (https://app.getambassador.io) +- Feature: All services are automatically instrumented with discovery annotations. + +## v6.5.20 + +- Update Ambassador to version v1.11.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.19 + +- Make all `livenessProbe` and `readinessProbe` configurations available to the values file + +## v6.5.18 + +- Update Ambassador to version v1.11.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.17 + +- Update Ambassador to version v1.11.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Bugfix: Fix Mapping definition to correctly support labels in use. + +## v6.5.16 + +- Bugfix: Ambassador CRD cleanup will now execute as expected. + +## v6.5.15 + +- Bugfix: Ambassador RBAC now includes permissions for IngressClasses. + +## v6.5.14 + +- Update for Ambassador v1.10.0 + +## v6.5.13 + +- Update for Ambassador v1.9.1 + +## v6.5.12 + +- Feature: Add ability to configure `terminationGracePeriodSeconds` for the Ambassador container +- Update for Ambassador v1.9.0 + +## v6.5.11 + +- Feature: add affinity and tolerations support for redis pods + +## v6.5.10 + +- Update Ambassador to version 1.8.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.9 + +- Update Ambassador to version 1.8.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Bugfix: The RBAC for AES now grants permission to "patch" Events.v1.core. Previously it granted "create" but not "patch". + +## v6.5.8 + +- Update Ambassador to version 1.7.4: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.7 + +- Update Ambassador to version 1.7.3: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- The BusyBox image image used by `test-ready` is now configurable (thanks, [Alan Silva](https://github.com/OmegaVVeapon)!) + +## v6.5.6 + +- Update Ambassador to version 1.7.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Feature: Allow overriding the namespace for the release using the values file: [ambassador-chart/#122](https://github.com/datawire/ambassador-chart/pull/122) + +## v6.5.5 + +- Allow hyphens in service annotations: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.4 + +- Upgrade Ambassador to version 1.7.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.3 + +- Upgrade Ambassador to version 1.7.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.2 + +- Feature: Add support for DaemonSet/Deployment labels: [ambassador-chart/#114](https://github.com/datawire/ambassador-chart/pull/114) +- Upgrade Ambassador to version 1.6.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.1 + +- Upgrade Ambassador to version 1.6.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.5.0 + +- Upgrade Ambassador to version 1.6.0: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.4.10 + +- Feature: Allow specifying annotations for the license-key-secret: [ambassador-chart/#106](https://github.com/datawire/ambassador-chart/issues/106) +- Feature: Annotation for keeping the AES secret on removal: [ambassador-chart/#110](https://github.com/datawire/ambassador-chart/issues/110) +- Fix: do not mount the secret if we do not want a secret: [ambassador-chart/#103](https://github.com/datawire/ambassador-chart/issues/103) +- Internal CI refactorings. + +## v6.4.9 + +- BugFix: Cannot specify podSecurityPolicies: [ambassador-chart/#97](https://github.com/datawire/ambassador-chart/issues/97) + +## v6.4.8 + +- Upgrade Ambassador to version 1.5.5: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.4.7 + +- BugFix: Registry service is now using the proper `app.kubernetes.io/name` +- BugFix: Restore ability to set `REDIS` env vars in `env` instead of `redisEnv` +- Feature: Add `envRaw` to support supplying raw yaml for environment variables. Deprecates `redisEnv`. + +## v6.4.6 + +- Upgrade Ambassador to version 1.5.4: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- Added support setting external IPs for the ambassador service (thanks, [Jason Smith](https://github.com/jasons42)!) + +## v6.4.5 + +- Upgrade Ambassador to version 1.5.3: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.4.4 + +- Feature flag for enabling or disabling the [`Project` registry](https://www.getambassador.io/docs/edge-stack/latest/topics/using/projects/) +- redisEnv for setting environment variables to control how Ambassador interacts with redis. See [redis environment](https://www.getambassador.io/docs/edge-stack/latest/topics/running/environment/#redis) + +## v6.4.3 + +- Upgrade Ambassador to version 1.5.2: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.4.2 + +- Upgrade Ambassador to version 1.5.1: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.4.1 + +- BugFix: The `PodSecurityPolicy` should not be created by default since it is a cluster-wide resource that should only be created once. + +If you would like to use the default `PodSecurityPolicy`, make sure to unset `security.podSecurityPolicy` it in all other releases. + +## v6.4.0 + +- Upgrade Ambassador to version 1.5.0: [CHANGELOG](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) +- AuthService and RateLimitService are now installed in the same namespace as Ambassador. +- Changes RBAC permissions to better support single-namespace installations and detecting getambassador.io CRDs. +- Add option to install Service Preview components (traffic-manager, traffic-agent). +- Add option to install ambassador-injector, alongside Service Preview. +- Add additional security policy configurations. + + `securityContext` has been deprecated in favor of `security` which allows you to set container and pod security contexts as well as a default `PodSecurityPolicy`. + +## v6.3.6 + +- Switch from Quay.io to DockerHub + +## v6.3.5 + +- Upgrade Ambassador to version 1.4.3: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.3.4 + +- Minor bug fixes + +## v6.3.3 + +- Add extra labels to ServiceMonitor: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.3.2 + +- Upgrade Ambassador to version 1.4.2: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.3.1 + +- Upgrade Ambassador to version 1.4.1: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.3.0 + +- Adds: Option to create a ServiceMonitor for scraping via Prometheus Operator + +## v6.2.5 + +- Upgrade Ambassador to version 1.4.0: [CHANGELOG}](https://github.com/datawire/ambassador/blob/master/CHANGELOG.md) + +## v6.2.4 + +- Fix typing so that Helm3 doesn't complain (thanks, [Fabrice Rabaute](https://github.com/jfrabaute)!) + +## v6.2.3 + +- Upgrade Ambassador to version 1.3.2. +- Use explicit types for things like ports, so that things like `helm .. --set service.ports[0].port=80` will be integers instead of ending up as strings + +## v6.2.2 + +- Upgrade Ambassador to version 1.3.1. +- Remove unnecessary `version` field from CRDs. +- Add static label to AES resources, to better support `edgectl install` + +## v6.2.1 + +- Upgrade Ambassador to version 1.3.0. + +## v6.2.0 + +- Add option to not create DevPortal routes + +## v6.1.5 + +- Upgrade Ambassador to version 1.2.2. + +## v6.1.4 + +- Upgrade from Ambassador 1.2.0 to 1.2.1. + +## v6.1.3 + +- Upgrade from Ambassador 1.1.1 to 1.2.0. + +## v6.1.2 + +- Upgrade from Ambassador 1.1.0 to 1.1.1. + +## v6.1.1 + +Minor Improvements: + +- Adds: Option to override the name of the RBAC resources + +## v6.1.0 + +Minor improvements including: + +- Adds: Option to set `restartPolicy` +- Adds: Option to give the AES license key secret a custom name +- Fixes: Assumption that the AES will be installed only from the `datawire/aes` repository. The `enableAES` flag now configures whether the AES is installed. +- Clarification on how to install OSS + +## v6.0.0 + +Introduces Ambassador Edge Stack being installed by default. + +### Breaking changes + +Ambassador Pro support has been removed in 6.0.0. Please upgrade to the Ambassador Edge Stack. + +## v5.0.0 + +### Breaking changes + +**Note** If upgrading an existing helm 2 installation no action is needed, previously installed CRDs will not be modified. + +- Helm 3 support for CRDs was added. Specifically, the CRD templates were moved to non-templated files in the `/crds` directory, and to keep Helm 2 support they are globbed from there by `/templates/crds.yaml`. However, because Helm 3 CRDs are not templated, the labels for new installations have necessarily changed + +## v4.0.0 + +### Breaking Changes + +- Introduces the performance tuned and certified build of open source Ambassador, Ambassador core +- The license key is now stored and read from a Kubernetes secret by default +- Added `.Values.pro.licenseKey.secret.enabled` `.Values.pro.licenseKey.secret.create` fields to allow multiple releases in the same namespace to use the same license key secret. + +### Minor Changes + +- Introduces the ability to configure resource limits for both Ambassador Pro and it's redis instance +- Introduces the ability to configure additional `AuthService` options (see [AuthService documentation](https://www.getambassador.io/reference/services/auth-service/)) +- The ambassador-pro-auth `AuthService` and ambassador-pro-ratelimit `RateLimitService` and now created as CRDs when `.Values.crds.enabled: true` +- Fixed misnamed selector for redis instance that failed in an edge case +- Exposes annotations for redis deployment and service + +## v3.0.0 + +### Breaking Changes + +- The default annotation has been removed. The service port will be set dynamically to 8080 or 8443 for http and https respectively. +- `service.http`, `service.https`, and `additionalTCPPort` has been replaced with `service.ports`. +- `rbac.namespaced` has been removed. Use `scope.singleNamespace` instead. + +### Minor Changes + +- Ambassador Pro will pick up when `AMBASSADOR_ID` is set in `.Values.env` [[#15025]](https://github.com/helm/charts/issues/15025). +- `{{release name}}-admins` has been renamed to `{{release name}}-admin` to match YAML install templates +- RBAC configuration has been updated to allow for CRD use when `scope.singleNamespace: true`. [[ambassador/#1576]](https://github.com/datawire/ambassador/issues/1576) +- RBAC configuration now allows for multiple Ambassadors to use CRDs. Set `crds.enabled` in releases that expect CRDs [[ambassador/#1679]](https://github.com/datawire/ambassador/issues/1679) + +## v2.6.0 + +### Minor Changes + +- Add ambassador CRDs! +- Update ambassador to 0.70.0 + +## v2.5.1 + +### Minor Changes + +- Update ambassador to 0.61.1 + +## v2.5.0 + +### Minor Changes + +- Add support for autoscaling using HPA, see `autoscaling` values. + +## v2.4.1 + +### Minor Changes + +- Update ambassador to 0.61.0 + +## v2.4.0 + +### Minor Changes + +- Allow configuring `hostNetwork` and `dnsPolicy` + +## v2.3.1 + +### Minor Changes + +- Adds HOST_IP environment variable + +## v2.3.0 + +### Minor Changes + +- Adds support for init containers using `initContainers` and pod labels `podLabels` + +## v2.2.5 + +### Minor Changes + +- Update ambassador to 0.60.3 + +## v2.2.4 + +### Minor Changes + +- Add support for Ambassador PRO [see readme](https://github.com/helm/charts/blob/master/stable/ambassador/README.md#ambassador-pro) + +## v2.2.3 + +### Minor Changes + +- Update ambassador to 0.60.2 + +## v2.2.2 + +### Minor Changes + +- Update ambassador to 0.60.1 + +## v2.2.1 + +### Minor Changes + +- Fix RBAC for ambassador 0.60.0 + +## v2.2.0 + +### Minor Changes + +- Update ambassador to 0.60.0 + +## v2.1.0 + +### Minor Changes + +- Added `scope.singleNamespace` for configuring ambassador to run in single namespace + +## v2.0.2 + +### Minor Changes + +- Update ambassador to 0.53.1 + +## v2.0.1 + +### Minor Changes + +- Update ambassador to 0.52.0 + +## v2.0.0 + +### Major Changes + +- Removed `ambassador.id` and `namespace.single` in favor of setting environment variables. + +## v1.1.5 + +### Minor Changes + +- Update ambassador to 0.50.3 + +## v1.1.4 + +### Minor Changes + +- support targetPort specification + +## v1.1.3 + +### Minor Changes + +- Update ambassador to 0.50.2 + +## v1.1.2 + +### Minor Changes + +- Add additional chart maintainer + +## v1.1.1 + +### Minor Changes + +- Default replicas -> 3 + +## v1.1.0 + +### Minor Changes + +- Allow RBAC to be namespaced (`rbac.namespaced`) + +## v1.0.0 + +### Major Changes + +- First release of Ambassador Helm Chart in helm/charts +- For migration see [Migrating from datawire/ambassador chart](https://github.com/helm/charts/tree/master/stable/ambassador#migrating-from-datawireambassador-chart-chart-version-0400-or-0500) diff --git a/charts/ambassador/ambassador/6.7.1100/CONTRIBUTING.md b/charts/ambassador/ambassador/6.7.1100/CONTRIBUTING.md new file mode 100644 index 000000000..443250b7a --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing to the Ambassador Helm Chart + +This Helm chart is used to install The Ambassador Edge Stack (AES) and is +maintained by Datawire. + +## Developing + +All work on the helm chart should be done in a separate branch off `master` and +contributed with a Pull Request targeting `master`. + +**Note**: All updates to the chart require you update the `version` in +`Chart.yaml`. + +## Testing + +The `ci/` directory contains scripts that will be run on PRs to `master`. + +- `ci/run_tests.sh` will run the tests of the chart. + +## Releasing + +Releasing a new chart is done by pushing a tag to `master`. Travis will then +run the tests and push the chart to `https://getambassador.io/helm`. diff --git a/charts/ambassador/ambassador/6.7.1100/Chart.yaml b/charts/ambassador/ambassador/6.7.1100/Chart.yaml new file mode 100644 index 000000000..a1cc4f3db --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Ambassador Edge Stack + catalog.cattle.io/release-name: ambassador +apiVersion: v1 +appVersion: 1.13.8 +description: A Helm chart for Datawire Ambassador +home: https://www.getambassador.io/ +icon: https://www.getambassador.io/images/logo.png +keywords: +- api gateway +- ambassador +- datawire +- envoy +maintainers: +- email: markus@maga.se + name: flydiverny +- email: flynn@datawire.io + name: kflynn +- email: nkrause@datawire.io + name: nbkrause +- email: lukeshu@datawire.io + name: lukeshu +name: ambassador +sources: +- https://github.com/datawire/ambassador +- https://github.com/prometheus/statsd_exporter +version: 6.7.1100 diff --git a/charts/ambassador/ambassador/6.7.1100/Makefile b/charts/ambassador/ambassador/6.7.1100/Makefile new file mode 100644 index 000000000..3271ecc11 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/Makefile @@ -0,0 +1,37 @@ +HELM_TEST_IMAGE = quay.io/helmpack/chart-testing:v3.0.0-rc.1 +K3D_CLUSTER_NAME = helm-chart-test-cluster +CHART_DIR := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +CHART_KUBECONFIG := /tmp/kubeconfig/k3dconfig +CT_EXEC = docker run --rm -v $(CHART_KUBECONFIG):/root/.kube/config -v $(CHART_DIR):/charts --network host $(HELM_TEST_IMAGE) ct +K3D_EXEC := KUBECONFIG=$(CHART_KUBECONFIG) k3d + +test-chart: lint-chart preflight-chart-test chart-create-cluster + $(CT_EXEC) install --config /charts/ct.yaml && \ + $(MAKE) chart-delete-cluster +.PHONY: test-chart + +lint-chart: preflight-kubeconfig + $(CT_EXEC) lint --config /charts/ct.yaml +.PHONY: lint-chart + +preflight-chart-test: preflight-kubeconfig + # check if k3d is installed + @if ! command -v k3d 2> /dev/null ; then \ + printf 'k3d not installed, plz do that'; \ + false; \ + fi +.PHONY: preflight-chart-test + +preflight-kubeconfig: + mkdir -p `dirname $(CHART_KUBECONFIG)` + touch $(CHART_KUBECONFIG) +.PHONY: preflight-kubeconfig + +chart-create-cluster: preflight-kubeconfig + $(MAKE) chart-delete-cluster || true + $(K3D_EXEC) cluster create $(K3D_CLUSTER_NAME) --k3s-server-arg "--no-deploy=traefik" +.PHONY: chart-create-cluster + +chart-delete-cluster: + $(K3D_EXEC) cluster delete $(K3D_CLUSTER_NAME) +.PHONY: chart-delete-cluster diff --git a/charts/ambassador/ambassador/6.7.1100/README.md b/charts/ambassador/ambassador/6.7.1100/README.md new file mode 100644 index 000000000..01ea965bc --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/README.md @@ -0,0 +1,478 @@ +# Ambassador + +The Ambassador Edge Stack is a self-service, comprehensive edge stack that is Kubernetes-native and built on [Envoy Proxy](https://www.envoyproxy.io/). + +## TL;DR; + +```console +$ helm repo add datawire https://getambassador.io +$ helm install ambassador datawire/ambassador +``` + +## Introduction + +This chart bootstraps an [Ambassador](https://www.getambassador.io) deployment on +a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.11+ + +## Add this Helm repository to your Helm client + +```console +helm repo add datawire https://getambassador.io +``` + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ kubectl create namespace ambassador +$ helm install my-release datawire/ambassador -n ambassador +``` + +The command deploys Ambassador Edge Stack in the ambassador namespace on the Kubernetes cluster in the default configuration. + +It is recommended to use the ambassador namespace for easy upgrades. + +The [configuration](#configuration) section lists the parameters that can be configured during installation. + +### Ambassador Edge Stack Installation + +This chart defaults to installing The Ambassador Edge Stack with all of its configuration objects. + +- A Redis instance +- `AuthService` resource for enabling authentication +- `RateLimitService` resource for enabling rate limiting +- `Mapping`s for internal request routing + +If installing alongside another deployment of Ambassador, some of these resources can cause configuration errors since only one `AuthService` or `RateLimitService` can be configured at a time. + +If you already have one of these resources configured in your cluster, please see the [configuration](#configuration) section below for information on how to disable them in the chart. + +### Ambassador OSS Installation + +This chart can still be used to install Ambassador OSS. + +To install OSS, change the `image` to use the OSS image and set `enableAES: false` to skip the install of any AES resources. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Changelog + +Notable chart changes are listed in the [CHANGELOG](./CHANGELOG.md) + +## Configuration + +The following tables lists the configurable parameters of the Ambassador chart and their default values. + +| Parameter | Description | Default | +|----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| `nameOverride` | Override the generated chart name. Defaults to .Chart.Name. | | +| `fullnameOverride` | Override the generated release name. Defaults to .Release.Name. | | +| `namespaceOverride` | Override the generated release namespace. Defaults to .Release.Namespace. | | +| `adminService.create` | If `true`, create a service for Ambassador's admin UI | `true` | +| `adminService.nodePort` | If explicit NodePort for admin service is required | `true` | +| `adminService.type` | Ambassador's admin service type to be used | `ClusterIP` | +| `adminService.annotations` | Annotations to apply to Ambassador admin service | `{}` | +| `adminService.loadBalancerIP` | IP address to assign (if cloud provider supports it) | `""` | +| `adminService.loadBalancerSourceRanges` | Passed to cloud provider load balancer if created (e.g: AWS ELB) | None | +| `ambassadorConfig` | Config thats mounted to `/ambassador/ambassador-config` | `""` | +| `crds.enabled` | If `true`, enables CRD resources for the installation. | `true` | +| `crds.create` | If `true`, Creates CRD resources | `true` | +| `crds.keep` | If `true`, if the ambassador CRDs should be kept when the chart is deleted | `true` | +| `daemonSet` | If `true`, Create a DaemonSet. By default Deployment controller will be created | `false` | +| `test.enabled` | If `true`, Create test Pod to verify the Ambassador service works correctly (Only created on `helm test`) | `true` | +| `test.image` | Image to use for the test Pod | `busybox` | +| `hostNetwork` | If `true`, uses the host network, useful for on-premise setups | `false` | +| `dnsPolicy` | Dns policy, when hostNetwork set to ClusterFirstWithHostNet | `ClusterFirst` | +| `env` | Any additional environment variables for ambassador pods | `{}` | +| `envRaw` | Additional environment variables in raw YAML format | `{}` | +| `image.pullPolicy` | Ambassador image pull policy | `IfNotPresent` | +| `image.repository` | Ambassador image | `docker.io/datawire/aes` | +| `image.tag` | Ambassador image tag | `1.13.8` | +| `imagePullSecrets` | Image pull secrets | `[]` | +| `namespace.name` | Set the `AMBASSADOR_NAMESPACE` environment variable | `metadata.namespace` | +| `scope.singleNamespace` | Set the `AMBASSADOR_SINGLE_NAMESPACE` environment variable and create namespaced RBAC if `rbac.enabled: true` | `false` | +| `podAnnotations` | Additional annotations for ambassador pods | `{}` | +| `deploymentAnnotations` | Additional annotations for ambassador DaemonSet/Deployment | `{}` | +| `podLabels` | Additional labels for ambassador pods | | +| `deploymentLabels` | Additional labels for ambassador DaemonSet/Deployment | | +| `affinity` | Affinity for ambassador pods | `{}` | +| `topologySpreadConstraints` | Topology Spread Constraints for Ambassador pods. Stable since 1.19. | `[]` | +| `nodeSelector` | NodeSelector for ambassador pods | `{}` | +| `priorityClassName` | The name of the priorityClass for the ambassador DaemonSet/Deployment | `""` | +| `rbac.create` | If `true`, create and use RBAC resources | `true` | +| `rbac.podSecurityPolicies` | pod security polices to bind to | | +| `rbac.nameOverride` | Overrides the default name of the RBAC resources | `` | +| `replicaCount` | Number of Ambassador replicas | `3` | +| `resources` | CPU/memory resource requests/limits | `{ "limits":{"cpu":"1000m","memory":"600Mi"},"requests":{"cpu":"200m","memory":"300Mi"}}` | +| `securityContext` | Set security context for pod | `{ "runAsUser": "8888" }` | +| `security.podSecurityContext` | Set the security context for the Ambassador pod | `{ "runAsUser": "8888" }` | +| `security.containerSecurityContext` | Set the security context for the Ambassador container | `{ "allowPrivilegeEscalation": false }` | +| `security.podSecurityPolicy` | Create a PodSecurityPolicy to be used for the pod. | `{}` | +| `restartPolicy` | Set the `restartPolicy` for pods | `` | +| `terminationGracePeriodSeconds` | Set the `terminationGracePeriodSeconds` for the pod. Defaults to 30 if unset. | `` | +| `initContainers` | Containers used to initialize context for pods | `[]` | +| `sidecarContainers` | Containers that share the pod context | `[]` | +| `livenessProbe.initialDelaySeconds` | Initial delay (s) for Ambassador pod's liveness probe | `30` | +| `livenessProbe.periodSeconds` | Probe period (s) for Ambassador pod's liveness probe | `3` | +| `livenessProbe.failureThreshold` | Failure threshold for Ambassador pod's liveness probe | `3` | +| `readinessProbe.initialDelaySeconds` | Initial delay (s) for Ambassador pod's readiness probe | `30` | +| `readinessProbe.periodSeconds` | Probe period (s) for Ambassador pod's readiness probe | `3` | +| `readinessProbe.failureThreshold` | Failure threshold for Ambassador pod's readiness probe | `3` | +| `service.annotations` | Annotations to apply to Ambassador service | `""` | +| `service.externalTrafficPolicy` | Sets the external traffic policy for the service | `""` | +| `service.nameOverride` | Sets the name of the service | `ambassador.fullname` | +| `service.ports` | List of ports Ambassador is listening on | `[{"name": "http","port": 80,"targetPort": 8080},{"name": "https","port": 443,"targetPort": 8443}]` | +| `service.loadBalancerIP` | IP address to assign (if cloud provider supports it) | `""` | +| `service.loadBalancerSourceRanges` | Passed to cloud provider load balancer if created (e.g: AWS ELB) | None | +| `service.sessionAffinity` | Sets the session affinity policy for the service | `""` | +| `service.sessionAffinityConfig` | Sets the session affinity config for the service | `""` | +| `service.type` | Service type to be used | `LoadBalancer` | +| `service.externalIPs` | External IPs to route to the ambassador service | `[]` | +| `serviceAccount.create` | If `true`, create a new service account | `true` | +| `serviceAccount.name` | Service account to be used | `ambassador` | +| `volumeMounts` | Volume mounts for the ambassador service | `[]` | +| `volumes` | Volumes for the ambassador service | `[]` | +| `enableAES` | Create the [AES configuration objects](#ambassador-edge-stack-installation) | `true` | +| `createDevPortalMappings` | Expose the dev portal on `/docs/` and `/documentation/` | `true` | +| `licenseKey.value` | Ambassador Edge Stack license. Empty will install in evaluation mode. | `` | +| `licenseKey.createSecret` | Set to `false` if installing mutltiple Ambassdor Edge Stacks in a namespace. | `true` | +| `licenseKey.secretName` | Name of the secret to store Ambassador license key in. | `` | +| `licenseKey.annotations` | Annotations to attach to the license-key-secret. | {} | +| `redisURL` | URL of redis instance not created by the release | `""` | +| `redisEnv` | (**DEPRECATED:** Use `envRaw`) Set env vars that control how Ambassador interacts with redis. | `""` | +| `redis.create` | Create a basic redis instance with default configurations | `true` | +| `redis.annotations` | Annotations for the redis service and deployment | `""` | +| `redis.resources` | Resource requests for the redis instance | `""` | +| `redis.nodeSelector` | NodeSelector for redis pods | `{}` | +| `redis.affinity` | Affinity for redis pods | `{}` | +| `redis.tolerations` | Tolerations for redis pods | `{}` | +| `authService.create` | Create the `AuthService` CRD for Ambassador Edge Stack | `true` | +| `authService.optional_configurations` | Config options for the `AuthService` CRD | `""` | +| `rateLimit.create` | Create the `RateLimit` CRD for Ambassador Edge Stack | `true` | +| `registry.create` | Create the `Project` registry. | `false` | +| `autoscaling.enabled` | If true, creates Horizontal Pod Autoscaler | `false` | +| `autoscaling.minReplicas` | If autoscaling enabled, this field sets minimum replica count | `2` | +| `autoscaling.maxReplicas` | If autoscaling enabled, this field sets maximum replica count | `5` | +| `autoscaling.metrics` | If autoscaling enabled, configure hpa metrics | | +| `podDisruptionBudget` | Pod disruption budget rules | `{}` | +| `resolvers.endpoint.create` | Create a KubernetesEndpointResolver | `false` | +| `resolvers.endpoint.name` | If creating a KubernetesEndpointResolver, the resolver name | `endpoint` | +| `resolvers.consul.create` | Create a ConsulResolver | `false` | +| `resolvers.consul.name` | If creating a ConsulResolver, the resolver name | `consul-dc1` | +| `resolvers.consul.spec` | If creating a ConsulResolver, additional configuration | `{}` | +| `module` | Configure and manage the Ambassador Module from the Chart | `{}` | +| `prometheusExporter.enabled` | DEPRECATED: Prometheus exporter side-car enabled | `false` | +| `prometheusExporter.pullPolicy` | DEPRECATED: Image pull policy | `IfNotPresent` | +| `prometheusExporter.repository` | DEPRECATED: Prometheus exporter image | `prom/statsd-exporter` | +| `prometheusExporter.tag` | DEPRECATED: Prometheus exporter image | `v0.8.1` | +| `prometheusExporter.resources` | DEPRECATED: CPU/memory resource requests/limits | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor object (`adminService.create` should be to `true`) | `false` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `metrics.serviceMonitor.selector` | Label Selector for Prometheus to find ServiceMonitors | `{ prometheus: kube-prometheus }` | +| `servicePreview.enabled` | If true, install Service Preview components: traffic-manager & traffic-agent (`enableAES` needs to also be to `true`) | `false` | +| `servicePreview.trafficManager.image.repository` | Ambassador Traffic-manager image | Same value as `image.repository` | +| `servicePreview.trafficManager.image.tag` | Ambassador Traffic-manager image tag | Same value as `image.tag` | +| `servicePreview.trafficManager.serviceAccountName` | Traffic-manager Service Account to be used | `traffic-manager` | +| `servicePreview.trafficAgent.image.repository` | Ambassador Traffic-agent image | Same value as `image.repository` | +| `servicePreview.trafficAgent.image.tag` | Ambassador Traffic-agent image tag | Same value as `image.tag` | +| `servicePreview.trafficAgent.injector.enabled` | If true, install the ambassador-injector | `true` | +| `servicePreview.trafficAgent.injector.crtPEM` | TLS certificate for the Common Name of ..svc | Auto-generated, valid for 365 days | +| `servicePreview.trafficAgent.injector.keyPEM` | TLS private key for the Common Name of ..svc | Auto-generated, valid for 365 days | +| `servicePreview.trafficAgent.port` | Traffic-agent listening port number when injected with ambassador-injector | `9900` | +| `servicePreview.trafficAgent.serviceAccountName` | Label Selector for Prometheus to find ServiceMonitors | `traffic-agent` | +| `servicePreview.trafficAgent.singleNamespace` | If `true`, installs the traffic-agent ServiceAccount and Role in the current installation namespace; Otherwise uses a global ClusterRole applied to every ServiceAccount | `true` | +| `agent.enabled` | If `true`, installs the ambassador-agent Deployment, ServiceAccount and ClusterRole in the ambassador namespace | `true` | +| `agent.cloudConnectionToken` | API token for reporting snapshots to the [Service Catalog](https://app.getambassador.io/cloud/catalog/); If empty, agent will not report snapshots | `""` | +| `agent.rpcAddress` | Address of the ambassador Service Catalog rpc server. | `https://app.getambassador.io/` | +| `agent.image.repository` | Image repository for the ambassador-agent deployment. Defaults to value of `image.repository` | Same value as `image.repository` | +| `agent.image.tag` | Image tag for the ambassador-agent deployment. Defaults to value of `image.tag` | Same value as `image.tag` | + +**NOTE:** Make sure the configured `service.http.targetPort` and `service.https.targetPort` ports match your [Ambassador Module's](https://www.getambassador.io/reference/modules/#the-ambassador-module) `service_port` and `redirect_cleartext_from` configurations. + +### The Ambasssador Edge Stack + +The Ambassador Edge Stack provides a comprehensive, self-service edge stack in +the Kubernetes cluster with a decentralized deployment model and a declarative +paradigm. + +By default, this chart will install the latest image of The Ambassador Edge +Stack which will replace your existing deployment of Ambassador with no changes +to functionality. + +### CRDs + +This helm chart includes the creation of the core CRDs Ambassador uses for +configuration. + +The `crds` flags (Helm 2 only) let you configure how a release manages crds. +- `crds.create` Can only be set on your first/master Ambassador release. +- `crds.enabled` Should be set on all releases using Ambassador CRDs +- `crds.keep` Configures if the CRDs are deleted when the master release is + purged. This value is only checked for the master release and can be set to + any value on secondary releases. + +### Security + +Ambassador takes security very seriously. For this reason, the YAML installation will default with a couple of basic security policies in place. + +The `security` field of the `values.yaml` file configures these default policies and replaces the `securityContext` field used earlier. + +The defaults will configure the pod to run as a non-root user and prohibit privilege escalation and outline a `PodSecurityPolicy` to ensure these conditions are met. + + + +```yaml +security: + # Security Context for all containers in the pod. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podsecuritycontext-v1-core + podSecurityContext: + runAsUser: 8888 + # Security Context for the Ambassador container specifically + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core + containerSecurityContext: + allowPrivilegeEscalation: false + # A basic PodSecurityPolicy to ensure Ambassador is running with appropriate security permissions + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + # + # A set of reasonable defaults is outlined below. This is not created by default as it should only + # be created by a one Release. If you want to use the PodSecurityPolicy in the chart, create it in + # the "master" Release and then leave it unset in all others. Set the `rbac.podSecurityPolicies` + # in all non-"master" Releases. + podSecurityPolicy: {} + # # Add AppArmor and Seccomp annotations + # # https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + # annotations: + # spec: + # seLinux: + # rule: RunAsAny + # supplementalGroups: + # rule: 'MustRunAs' + # ranges: + # # Forbid adding the root group. + # - min: 1 + # max: 65535 + # fsGroup: + # rule: 'MustRunAs' + # ranges: + # # Forbid adding the root group. + # - min: 1 + # max: 65535 + # privileged: false + # allowPrivilegeEscalation: false + # runAsUser: + # rule: MustRunAsNonRoot +``` + +### Annotations + +Ambassador is configured using Kubernetes Custom Resource Definitions (CRDs). If you are unable to use CRDs, Ambassador can also be configured using annotations on services. The `service.annotations` section of the values file contains commented out examples of [Ambassador Module](https://www.getambassador.io/reference/core/ambassador) and a global [TLSContext](https://www.getambassador.io/reference/core/tls) configurations which are typically created in the Ambassador service. + +If you intend to use `service.annotations`, remember to include the `getambassador.io/config` annotation key as above. + +### Prometheus Metrics + +Using the Prometheus Exporter has been deprecated and is no longer recommended. You can now use `metrics.serviceMonitor.enabled` to create a `ServiceMonitor` from the chart if the [Prometheus Operator](https://github.com/coreos/prometheus-operator) has been installed on your cluster. + +Please see Ambassador's [monitoring with Prometheus](https://www.getambassador.io/user-guide/monitoring/) docs for more information on using the `/metrics` endpoint for metrics collection. + +### Specifying Values + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install --wait my-release \ + --set adminService.type=NodePort \ + datawire/ambassador +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install --wait my-release -f values.yaml datawire/ambassador +``` + +--- + +# Upgrading + +## To 6.0.0 + +Introduces Ambassador Edge Stack being installed by default. + +### Breaking changes + +Ambassador Pro support has been removed in 6.0.0. Please [upgrade to the Ambassador Edge Stack](https://www.getambassador.io/user-guide/helm). + +## To 5.0.0 + +### Breaking changes + +**Note** If upgrading an existing helm 2 installation no action is needed, previously installed CRDs will not be modified. + +- Helm 3 support for CRDs was added. Specifically, the CRD templates were moved to non-templated files in the `/crds` directory, and to keep Helm 2 support they are globbed from there by `/templates/crds.yaml`. However, because Helm 3 CRDs are not templated, the labels for new installations have necessarily changed + +## To 4.0.0 + +The 4.0.0 chart contains a number of changes to the way Ambassador Pro is installed. + +- Introduces the performance tuned and certified build of open source Ambassador, Ambassador core +- The license key is now stored and read from a Kubernetes secret by default +- Added `.Values.pro.licenseKey.secret.enabled` `.Values.pro.licenseKey.secret.create` fields to allow multiple releases in the same namespace to use the same license key secret. +- Introduces the ability to configure resource limits for both Ambassador Pro and it's redis instance +- Introduces the ability to configure additional `AuthService` options (see [AuthService documentation](https://www.getambassador.io/reference/services/auth-service/)) +- The ambassador-pro-auth `AuthService` and ambassador-pro-ratelimit `RateLimitService` and now created as CRDs when `.Values.crds.enabled: true` +- Fixed misnamed selector for redis instance that failed in an edge case +- Exposes annotations for redis deployment and service + +### Breaking changes + +The value of `.Values.pro.image.tag` has been shortened to assume `amb-sidecar` (and `amb-core` for Ambassador core) +`values.yaml` +```diff +<3.0.0> + image: + repository: quay.io/datawire/ambassador_pro +- tag: amb-sidecar-0.6.0 + +<4.0.0+> + image: + repository: quay.io/datawire/ambassador_pro ++ tag: 0.7.0 +``` + +Method for creating a Kubernetes secret to hold the license key has been changed + +`values.yaml` +```diff +<3.0.0> +- secret: false +<4.0.0> ++ secret: ++ enabled: true ++ create: true +``` + +## To 3.0.0 + +### Service Ports + +The way ports are assigned has been changed for a more dynamic method. + +Now, instead of setting the port assignments for only the http and https, any port can be open on the load balancer using a list like you would in a standard Kubernetes YAML manifest. + +`pre-3.0.0` +```yaml +service: + http: + enabled: true + port: 80 + targetPort: 8080 + https: + enabled: true + port: 443 + targetPort: 8443 +``` + +`3.0.0` +```yaml +service: + ports: + - name: http + port: 80 + targetPort: 8080 + - name: https + port: 443 + targetPort: 8443 +``` + +This change has also replaced the `.additionalTCPPorts` configuration. Additional TCP ports can be created the same as the http and https ports above. + +### Annotations and `service_port` + +The below Ambassador `Module` annotation is no longer being applied by default. + +```yaml +getambassador.io/config: | + --- + apiVersion: ambassador/v1 + kind: Module + name: ambassador + config: + service_port: 8080 +``` +This was causing confusion with the `service_port` being hard-coded when enabling TLS termination in Ambassador. + +Ambassador has been listening on port 8080 for HTTP and 8443 for HTTPS by default since version `0.60.0` (chart version 2.2.0). + +### RBAC and CRDs + +A `ClusterRole` and `ClusterRoleBinding` named `{{release name}}-crd` will be created to watch for the Ambassador Custom Resource Definitions. This will be created regardless of the value of `scope.singleNamespace` since CRDs are created the cluster scope. + +`rbac.namespaced` has been removed. For namespaced RBAC, set `scope.singleNamespace: true` and `rbac.enabled: true`. + +`crds.enabled` will indicate that you are using CRDs and will create the rbac resources regardless of the value of `crds.create`. This allows for multiple deployments to use the CRDs. + +## To 2.0.0 + +### Ambassador ID + +ambassador.id has been removed in favor of setting it via an environment variable in `env`. `AMBASSADOR_ID` defaults to `default` if not set in the environment. This is mainly used for [running multiple Ambassadors](https://www.getambassador.io/reference/running#ambassador_id) in the same cluster. + +| Parameter | Env variables | +| --------------- | --------------- | +| `ambassador.id` | `AMBASSADOR_ID` | + +## Migrating from `datawire/ambassador` chart (chart version 0.40.0 or 0.50.0) + +Chart now runs ambassador as non-root by default, so you might need to update your ambassador module config to match this. + +### Timings + +Timings values have been removed in favor of setting the env variables using `env´ + +| Parameter | Env variables | +| ----------------- | -------------------------- | +| `timing.restart` | `AMBASSADOR_RESTART_TIME` | +| `timing.drain` | `AMBASSADOR_DRAIN_TIME` | +| `timing.shutdown` | `AMBASSADOR_SHUTDOWN_TIME` | + +### Single namespace + +| Parameter | Env variables | +| ------------------ | ----------------------------- | +| `namespace.single` | `AMBASSADOR_SINGLE_NAMESPACE` | + +### Renamed values + +Service ports values have changed names and target ports have new defaults. + +| Previous parameter | New parameter | New default value | +| --------------------------- | -------------------------- | ----------------- | +| `service.enableHttp` | `service.http.enabled` | | +| `service.httpPort` | `service.http.port` | | +| `service.httpNodePort` | `service.http.nodePort` | | +| `service.targetPorts.http` | `service.http.targetPort` | `8080` | +| `service.enableHttps` | `service.https.enabled` | | +| `service.httpsPort` | `service.https.port` | | +| `service.httpsNodePort` | `service.https.nodePort` | | +| `service.targetPorts.https` | `service.https.targetPort` | `8443` | + +### Exporter sidecar + +Pre version `0.50.0` ambassador was using socat and required a sidecar to export statsd metrics. In `0.50.0` ambassador no longer uses socat and doesn't need a sidecar anymore to export its statsd metrics. Statsd metrics are disabled by default and can be enabled by setting environment `STATSD_ENABLED`, this will (in 0.50) send metrics to a service named `statsd-sink`, if you want to send it to another service or namespace it can be changed by setting `STATSD_HOST` + +If you are using prometheus the chart allows you to enable a sidecar which can export to prometheus see the `prometheusExporter` values. diff --git a/charts/ambassador/ambassador/6.7.1100/RELEASE.tpl b/charts/ambassador/ambassador/6.7.1100/RELEASE.tpl new file mode 100644 index 000000000..d00d6b2f2 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/RELEASE.tpl @@ -0,0 +1,8 @@ +## :tada: Ambassador Chart $CHART_VERSION :tada: + +Upgrade Ambassador - https://www.getambassador.io/reference/upgrading#helm.html +View changelog - https://github.com/datawire/ambassador/blob/master/charts/ambassador/CHANGELOG.md + +--- + + diff --git a/charts/ambassador/ambassador/6.7.1100/RELEASE_TITLE.tpl b/charts/ambassador/ambassador/6.7.1100/RELEASE_TITLE.tpl new file mode 100644 index 000000000..7aab5973c --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/RELEASE_TITLE.tpl @@ -0,0 +1 @@ +Ambassador Chart $CHART_VERSION diff --git a/charts/ambassador/ambassador/6.7.1100/app-readme.md b/charts/ambassador/ambassador/6.7.1100/app-readme.md new file mode 100644 index 000000000..d2ef7356e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/app-readme.md @@ -0,0 +1,13 @@ +# Ambassador Edge Stack and Emissary Ingress Chart + +[Ambassador Edge Stack](https://www.getambassador.io/products/edge-stack/) and its open source CNCF counterpart [Emissary-Ingress](https://www.getambassador.io/products/api-gateway/) are Kubernetes native, high-performance Ingress controllers designed with GitOps workflows and developer experience in mind. The Edge Stack allows users to manage [Authentication](https://www.getambassador.io/docs/edge-stack/latest/topics/using/filters/), [Rate Limits](https://www.getambassador.io/docs/edge-stack/latest/topics/using/rate-limits/rate-limits/), [TLS](https://www.getambassador.io/docs/edge-stack/latest/topics/running/tls/) and more with easy-to-use resources for [managing your APIs](https://www.getambassador.io/docs/edge-stack/latest/topics/using/intro-mappings/). + +## Service Catalog + +The default installation of Ambassador Edge Stack includes the deployment needed to get started with [Service Catalog](https://www.getambassador.io/products/service-catalog/) and the [Developer Control Plane](https://www.getambassador.io/developer-control-plane/). Simply generate your [Cloud Token](https://www.getambassador.io/docs/cloud/latest/service-catalog/quick-start/#1-connect-your-cluster-to-ambassador-cloud) and add it in the Service Catalog section as you're setting up the chart. + +## More Info + +Visit the [Quick Start](https://www.getambassador.io/docs/edge-stack/latest/tutorials/getting-started/) page for more instructions, or check out our [documentation](https://www.getambassador.io/docs/edge-stack). For any questions, or to join the community, visit our [Slack](https://a8r.io/slack) and say hi! + +* Ambassador recommends a Kubernetes version of 1.16 or higher. diff --git a/charts/ambassador/ambassador/6.7.1100/ci/01-psp-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/01-psp-values.yaml new file mode 100644 index 000000000..27152824e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/01-psp-values.yaml @@ -0,0 +1,40 @@ +security: + # Security Context for all containers in the pod. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podsecuritycontext-v1-core + podSecurityContext: + runAsUser: 8888 + # Security Context for the Ambassador container specifically + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core + containerSecurityContext: + allowPrivilegeEscalation: false + # A basic PodSecurityPolicy to ensure Ambassador is running with appropriate security permissions + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + # + # A set of reasonable defaults is outlined below. This is not created by default as it should only + # be created by a one Release. If you want to use the PodSecurityPolicy in the chart, create it in + # the "master" Release and then leave it unset in all others. Set the `rbac.podSecurityPolicies` + # in all non-"master" Releases. + podSecurityPolicy: + # Add AppArmor and Seccomp annotations + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + spec: + seLinux: + rule: RunAsAny + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + privileged: false + allowPrivilegeEscalation: false + runAsUser: + rule: MustRunAsNonRoot diff --git a/charts/ambassador/ambassador/6.7.1100/ci/02-oss-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/02-oss-values.yaml new file mode 100644 index 000000000..4fb9ff60c --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/02-oss-values.yaml @@ -0,0 +1,8 @@ +# install the Ambassador API Gateway +image: + pullPolicy: IfNotPresent + +enableAES: false + +deploymentStrategy: + type: Recreate diff --git a/charts/ambassador/ambassador/6.7.1100/ci/05-auth-disabled-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/05-auth-disabled-values.yaml new file mode 100644 index 000000000..769f8eb55 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/05-auth-disabled-values.yaml @@ -0,0 +1,8 @@ +service: + type: NodePort + +authService: + create: false + +deploymentStrategy: + type: Recreate diff --git a/charts/ambassador/ambassador/6.7.1100/ci/06-hpa-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/06-hpa-values.yaml new file mode 100644 index 000000000..56509eb8b --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/06-hpa-values.yaml @@ -0,0 +1,8 @@ +deploymentStrategy: + type: Recreate + +service: + type: NodePort + +autoscaling: + enabled: true diff --git a/charts/ambassador/ambassador/6.7.1100/ci/08-single-namespace-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/08-single-namespace-values.yaml new file mode 100644 index 000000000..591785bde --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/08-single-namespace-values.yaml @@ -0,0 +1,8 @@ +service: + type: NodePort + +deploymentStrategy: + type: Recreate + +scope: + singleNamespace: true diff --git a/charts/ambassador/ambassador/6.7.1100/ci/09-redis-false-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/09-redis-false-values.yaml new file mode 100644 index 000000000..e545210d5 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/09-redis-false-values.yaml @@ -0,0 +1,9 @@ +service: + type: NodePort + +redis: + enabled: false + # Annotations for Ambassador Pro's redis instance. + +deploymentStrategy: + type: Recreate diff --git a/charts/ambassador/ambassador/6.7.1100/ci/12-daemonset-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/12-daemonset-values.yaml new file mode 100644 index 000000000..9a581d94b --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/12-daemonset-values.yaml @@ -0,0 +1,7 @@ +service: + type: NodePort + +deploymentStrategy: + type: RollingUpdate + +daemonSet: true diff --git a/charts/ambassador/ambassador/6.7.1100/ci/13-rl-disabled-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/13-rl-disabled-values.yaml new file mode 100644 index 000000000..a1dfe0434 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/13-rl-disabled-values.yaml @@ -0,0 +1,8 @@ +service: + type: NodePort + +rateLimit: + create: false + +deploymentStrategy: + type: Recreate diff --git a/charts/ambassador/ambassador/6.7.1100/ci/14-deployment-labels.yaml b/charts/ambassador/ambassador/6.7.1100/ci/14-deployment-labels.yaml new file mode 100644 index 000000000..33ebe5b74 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/14-deployment-labels.yaml @@ -0,0 +1,3 @@ +deploymentLabels: + label: foo + label2: bar diff --git a/charts/ambassador/ambassador/6.7.1100/ci/15-test-resolvers.yaml b/charts/ambassador/ambassador/6.7.1100/ci/15-test-resolvers.yaml new file mode 100644 index 000000000..0601ce9c4 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/15-test-resolvers.yaml @@ -0,0 +1,11 @@ +resolvers: + endpoint: + create: true + name: endpoint-foo + + consul: + create: true + name: consul-foo + spec: + address: ${HOST_IP} + datacenter: dc1 \ No newline at end of file diff --git a/charts/ambassador/ambassador/6.7.1100/ci/16-test-module.yaml b/charts/ambassador/ambassador/6.7.1100/ci/16-test-module.yaml new file mode 100644 index 000000000..d80bf9508 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/16-test-module.yaml @@ -0,0 +1,9 @@ +module: + lua_scripts: | + function envoy_on_response(response_handle) + response_handle:headers():add("Lua-Scripts-Enabled", "Processed") + end + + ip_allow: + - peer: 127.0.0.1 + - remote: 99.99.0.0/16 \ No newline at end of file diff --git a/charts/ambassador/ambassador/6.7.1100/ci/17-svc-preview.yaml b/charts/ambassador/ambassador/6.7.1100/ci/17-svc-preview.yaml new file mode 100644 index 000000000..a141bcdda --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/17-svc-preview.yaml @@ -0,0 +1,5 @@ +servicePreview: + enabled: true +trafficAgent: + injector: + enabled: true diff --git a/charts/ambassador/ambassador/6.7.1100/ci/check_updated_changelog.sh b/charts/ambassador/ambassador/6.7.1100/ci/check_updated_changelog.sh new file mode 100644 index 000000000..1840c1799 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/check_updated_changelog.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } +TOP_DIR=$CURR_DIR/.. + +# shellcheck source=common.sh +source "$CURR_DIR/common.sh" + +echo ${TOP_DIR} +chart_version=$(get_chart_version ${TOP_DIR}) + +if ! grep "## v${chart_version}" ${TOP_DIR}/CHANGELOG.md > /dev/null 2>&1 ; then + echo "Current chart version does not appear in the changelog." + echo "Please run ambassador.git/charts/ambassador/ci/update_chart_changelog.sh and commit." + exit 1 +fi + +echo "Changelog looks good!" diff --git a/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/backend.yaml b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/backend.yaml new file mode 100644 index 000000000..b2d9205df --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/backend.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: getambassador.io/v1 +kind: Mapping +metadata: + name: quote-backend +spec: + prefix: /backend/ + service: quote +--- +apiVersion: v1 +kind: Service +metadata: + name: quote +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + selector: + app: quote +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: quote +spec: + replicas: 1 + selector: + matchLabels: + app: quote + strategy: + type: RollingUpdate + template: + metadata: + labels: + app: quote + spec: + containers: + - name: backend + image: datawire/quote:0.4.0 + ports: + - name: http + containerPort: 8080 + resources: + limits: + cpu: "0.1" + memory: 100Mi diff --git a/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/ci-default-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/ci-default-values.yaml new file mode 100644 index 000000000..0a1ec852e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/ci-default-values.yaml @@ -0,0 +1,9 @@ +#env: +# AMBASSADOR_SINGLE_NAMESPACE: true +# AMBASSADOR_NO_KUBEWATCH: no_kubewatch + +deploymentStrategy: + type: Recreate + +service: + type: NodePort diff --git a/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm-init.yaml b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm-init.yaml new file mode 100644 index 000000000..1fcf47dca --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm-init.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tiller + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tiller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system diff --git a/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm2-values.yaml b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm2-values.yaml new file mode 100644 index 000000000..d9c0c83c3 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/helm2-values.yaml @@ -0,0 +1,6 @@ +service: + type: NodePort + +crds: + create: false + diff --git a/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/tls.yaml b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/tls.yaml new file mode 100644 index 000000000..bc25cf664 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/tests/manifests/tls.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNzRENDQVpnQ0NRRHVzSjFYNE54NjJEQU5CZ2txaGtpRzl3MEJBUXNGQURBYU1SZ3dGZ1lEVlFRRERBOWgKYldKaGMzTmhaRzl5TFdObGNuUXdIaGNOTVRreE1qRXpNakV3TXpBNVdoY05NakF4TWpFeU1qRXdNekE1V2pBYQpNUmd3RmdZRFZRUUREQTloYldKaGMzTmhaRzl5TFdObGNuUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQzJjcms1OTk0dklURnZpUGNJRjJpd3R0dVdpajFTeUZNSzVpbERieFByMmJCL3RmbDYKcmRwVUFlWkkrMTVDR2VHbi80ZitwRlFXODdwZ2ZvbDhlL3lCSTUvWStpdVIrQUY1bzhQV2h4aHBJdVk3RXdVbgpyT3ZJajcxaUZWa1Q4akRYZW9RWWdKalQ1MWh4SisyelVLZ3VtZDB6L05USEwrQndFbHZ4Z1ZuWlhUdlhsVGFiClBoWloyK3dZdDQvSnozN3lBMHJwNURKeTg5SStQNmVRSmNseUVyWmsvRUNuRFBxTlhDK1VyclVySXBsNkRScHUKdGZWOE9KM3BJZWc2YjBWQi9TQnRPNzFhMThkaXFPclFVREU5MEFOSWJsYWp0ODN5M0FIc29SNytpVGI0QXFvVwpiNG5LcVV1bEQ2QU0xVUg0TzR0SExIM05SemVOL1E1ZUtVb0ZBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFECmdnRUJBQ045b050OVJXT2JOTTBmajg1cm9GUG1zRE1UWElFOU5SNmpDMjV1SGtpY0lnamtGd043NTFkTnQxT0YKZWZLSkFzTDlSWmZUNmVmMUMxOFlnWE1xbTF5Yis0Q1VWWU9RZW96MlgweEdyT1lLZUhPM0hqamNqcXZ1cUxTeApTQ2duVlM1NkhqZU15MzJBNnIxcUhBL1FsYkkraGJFbHN0MVNwYnFSOG95dE9oUzZpNFNWbWxacWxBRkx5WFRRCjZ6Nm5wc25lTmdXMXhkdDN2UkpleXVFWEFZL0Z0eUxmZnkxdk5uODhhTkg2Y2Z2eWJjaHNkaWlRNnVXTnowVGMKeVVodGZUYWFRVjA1d21KZEJ3ZmJndXF0UjFxbXdyNCtzcjA0MEhoQ0pSVmlRUUdHa2VWSVU5ZHFPY0ZkZk5FTQo5NmczU01YWGRrcVhBYzFFb2hZdEthMWwvNTA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdG5LNU9mZmVMeUV4YjRqM0NCZG9zTGJibG9vOVVzaFRDdVlwUTI4VDY5bXdmN1g1CmVxM2FWQUhtU1B0ZVFobmhwLytIL3FSVUZ2TzZZSDZKZkh2OGdTT2YyUG9ya2ZnQmVhUEQxb2NZYVNMbU94TUYKSjZ6cnlJKzlZaFZaRS9JdzEzcUVHSUNZMCtkWWNTZnRzMUNvTHBuZE0velV4eS9nY0JKYjhZRloyVjA3MTVVMgptejRXV2R2c0dMZVB5YzkrOGdOSzZlUXljdlBTUGorbmtDWEpjaEsyWlB4QXB3ejZqVnd2bEs2MUt5S1plZzBhCmJyWDFmRGlkNlNIb09tOUZRZjBnYlR1OVd0ZkhZcWpxMEZBeFBkQURTRzVXbzdmTjh0d0I3S0VlL29rMitBS3EKRm0rSnlxbExwUStnRE5WQitEdUxSeXg5elVjM2pmME9YaWxLQlFJREFRQUJBb0lCQUVOQy9qaDV3Z2E4QlA2cQpqdkFEdVV2VXpoV3N0empxczNyRUtaZzd2aXRvSU9La1V1cEFaOG9xdlJ4UTE0b2xBb1V0OXBRUlB4TUxIYjN2ClNINkZNeXprMWt4bXhtTlUvQzQ5Q3Jqdkt6ZXZieE4rU3BzNjY5NFA1L0RlRCs0RGpyQVI4ZHNhcGIwUmdCQ1AKZU5sdnRlRWdSbVdoSTB5ZndPMXdSMGM4dWNRaE5GcjNNd0lMQ1FES0Zpa2NDSi9GV0FmNXc4ZGFnYnBYTXAxawo3ci9ya1BFcVh6NnRxam04eWZZWlRoaGIwUE5LcSsxOGdkaCtLeTZPL1RnTVZ2d1BLVkIrZUhoQmJZY2R6VGYxCmxia3pVeUFhZmR3VlBTTnhVOWhzSzBqNExWZG83YlVkajZySHNXTlBWcm1Ib1VsUnNjcno2aDhPZlQ0bU9WTi8KRmhtcEhvRUNnWUVBNlVENVlWMUJrWEg4S21WRjhiVGVGVGlTY1IvRGI4TVlRY3NLNzQrNEg5aEFmbjR3d3ZWeQpidi9kL2NsOWZHY0xXN0w1QWM1WWRxNlNWZGFHRVZpVzVOTy8xV0wwN3JjaG8xZTRaSzlPemJiUllNWW51cWRHCmF5eGhoUks0R25ubzZXMTlMWWc1d1F3TUJvUzNSbFVxUWN3UU1ESiszMVc4emwrazdpODk0dVVDZ1lFQXlEMXIKZHVMSGRMcG9UWEd2ZjZIVk9HQ1pWczQvSXg1M1B1WnRXY1FkYkc5MDZNWHRwdldWdDN1ek8rVVd2WGJIWHBSMQpjZWVrUHRucTI4a1BFT2oyd3NoVFRQQ05OT0dUWE01SzREbTVjNjVaeWY0WVJjQ0NZNEpSSUNqWHExeE9uc21nCk8ydTZiYlVQWE9veXFmVllWK0wwK25zcHNLOUNCd0ZaMjJqMXVLRUNnWUVBbzcyZTBzQ2FaTFcxcFRWT3NteWIKY2g0eWZ3TWpPUE9sdFpvSlpUNW9yTUlzRkNBVnJ1YUtuRzAxc3hDYzdKV1JuWiszdVpMVyt3bDFaSmlocU0rZApyYWtRQTRYaUZ5bXJqWFRvMXBWU0pvcnQxSmVHRUR1WTdXZE1WaFJiOVFvYmZMSUZxODd6YkJjKzRkeU1vK3pwCkt5TkxRZXBRc2dzSDdYK3EwaUdMdWhrQ2dZQUlYQWdRZm9jMUtGTVNhSnliQjNhUFUva1MxcWxzSGVsOGhzSXAKN1RZTlFObnduZEsrRmFLYWRsK1ZNSXN5ZmJMMUQ5MlhVOFJYbTJGaXE1SWxjcFJhcldKTTQvNEJKeW12eGl6NgpEMjdlbFhqS0pnRjlaL3dKaTNjM2tIendlbm9OeHYwWmZmWGFmcVNWakhGeEJ2MFpMakJzQkpoSStBZ1pvc1ROCmxDUXVBUUtCZ1FESHVxNUVseU1RS2NZWm5tRlN6T2ZYWXNJYm8rZEJJTlEyNnB0OFdacGMydnpsNkNrQXV3TWwKQU9jRllrbjBXSnVJRXRubnhPT3Rwcnh0VGRIWGIvOWZWY090Unp2TitvVjN2OVNEalZjWTRESWp3MXlpMkt1Vwp6MmV1N1lCNExlbG13TFlHMEJUMWp0ejJJREUxYW85MzgybEpWV2J4Y1dsdHArWTFCRWhkdmc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= +kind: Secret +metadata: + name: self-signed-cert +type: kubernetes.io/tls +--- +apiVersion: getambassador.io/v1 +kind: TLSContext +metadata: + name: tls +spec: + hosts: ["*"] + secret: self-signed-cert + diff --git a/charts/ambassador/ambassador/6.7.1100/ci/update_chart_changelog.sh b/charts/ambassador/ambassador/6.7.1100/ci/update_chart_changelog.sh new file mode 100644 index 000000000..ee7bfeaa8 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ci/update_chart_changelog.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e + +CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; } +TOP_DIR=$CURR_DIR/.. + +# shellcheck source=common.sh +source "$CURR_DIR/common.sh" + +chart_version=$(get_chart_version ${TOP_DIR}) + +new_changelog=${TOP_DIR}/CHANGELOG.new.md +rm ${new_changelog} || true +while IFS= read -r line ; do + echo -e "${line}" + echo -e "${line}" >> ${new_changelog} + if [[ "${line}" =~ "## Next Release" ]] ; then + echo "" >> ${new_changelog} + echo "(no changes yet)" >> ${new_changelog} + echo "" >> ${new_changelog} + echo "## v${chart_version}" >> ${new_changelog} + fi + +done < ${TOP_DIR}/CHANGELOG.md + +mv ${new_changelog} ${TOP_DIR}/CHANGELOG.md +if [[ -n "${DONT_COMMIT_DIFF}" ]] ; then + echo "DONT_COMMIT_DIFF is set, not committing" + exit 0 +fi + +if git diff --exit-code -- ${TOP_DIR}/CHANGELOG.md > /dev/null 2>&1 ; then + echo "No changes to changelog, exiting" + exit 0 +fi + +branch_name="$(git symbolic-ref HEAD 2>/dev/null)" || +branch_name="detached" + +if [[ "${branch_name}" == "refs/heads/master" ]] ; then + echo "Not committing local changes to branch because branch is master" + exit 1 +elif [[ "${branch_name}" == "detached" ]] ; then + echo "Not committing local changes because you're in a detached head state" + echo "please create a branch then rerun this script" + exit 1 +fi +branch_name=${branch_name##refs/heads/} +git add ${TOP_DIR}/CHANGELOG.md +git commit -m "Committing changelog for chart v${chart_version}" +git push -u origin ${branch_name} diff --git a/charts/ambassador/ambassador/6.7.1100/crds/filter.yaml b/charts/ambassador/ambassador/6.7.1100/crds/filter.yaml new file mode 100644 index 000000000..bf0403820 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/filter.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: filters.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: Filter + plural: filters + shortNames: + - fil + singular: filter + scope: Namespaced + versions: + - name: v1beta2 + served: true + storage: false + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/filterpolicy.yaml b/charts/ambassador/ambassador/6.7.1100/crds/filterpolicy.yaml new file mode 100644 index 000000000..88e3781da --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/filterpolicy.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: filterpolicies.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: FilterPolicy + plural: filterpolicies + shortNames: + - fp + singular: filterpolicy + scope: Namespaced + versions: + - name: v1beta2 + served: true + storage: false + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_authservices.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_authservices.yaml new file mode 100644 index 000000000..1c97d063f --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_authservices.yaml @@ -0,0 +1,115 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: authservices.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: AuthService + listKind: AuthServiceList + plural: authservices + singular: authservice + scope: Namespaced + validation: + openAPIV3Schema: + description: AuthService is the Schema for the authservices API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AuthServiceSpec defines the desired state of AuthService + properties: + add_auth_headers: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + add_linkerd_headers: + type: boolean + allow_request_body: + type: boolean + allowed_authorization_headers: + items: + type: string + type: array + allowed_request_headers: + items: + type: string + type: array + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + auth_service: + type: string + failure_mode_allow: + type: boolean + include_body: + properties: + allow_partial: + type: boolean + max_bytes: + description: These aren't pointer types because they are required. + type: integer + required: + - allow_partial + - max_bytes + type: object + path_prefix: + type: string + proto: + enum: + - http + - grpc + type: string + protocol_version: + enum: + - v2 + - v3 + type: string + status_on_error: + description: Why isn't this just an int?? + properties: + code: + type: integer + type: object + timeout_ms: + type: integer + tls: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + required: + - auth_service + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_consulresolvers.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_consulresolvers.yaml new file mode 100644 index 000000000..0f659d5d3 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_consulresolvers.yaml @@ -0,0 +1,58 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: consulresolvers.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: ConsulResolver + listKind: ConsulResolverList + plural: consulresolvers + singular: consulresolver + scope: Namespaced + validation: + openAPIV3Schema: + description: ConsulResolver is the Schema for the ConsulResolver API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsulResolver tells Ambassador to use Consul to resolve services. In addition to the AmbassadorID, it needs information about which Consul server and DC to use. + properties: + address: + type: string + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + datacenter: + type: string + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_devportals.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_devportals.yaml new file mode 100644 index 000000000..291d2a66e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_devportals.yaml @@ -0,0 +1,109 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: devportals.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: DevPortal + listKind: DevPortalList + plural: devportals + singular: devportal + scope: Namespaced + validation: + openAPIV3Schema: + description: "DevPortal is the Schema for the DevPortals API \n DevPortal resources specify the `what` and `how` is shown in a DevPortal: \n * `what` is in a DevPortal can be controlled with - a `selector`, that can be used for filtering `Mappings`. - a `docs` listing of (services, url) * `how` is a pointer to some `contents` (a checkout of a Git repository with go-templates/markdown/css). \n Multiple `DevPortal`s can exist in the cluster, and the Dev Portal server will show them at different endpoints. A `DevPortal` resource with a special name, `ambassador`, will be used for configuring the default Dev Portal (served at `/docs/` by default)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DevPortalSpec defines the desired state of DevPortal + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + content: + description: Content specifies where the content shown in the DevPortal come from + properties: + branch: + type: string + dir: + type: string + url: + type: string + type: object + default: + description: Default must be true when this is the default DevPortal + type: boolean + docs: + description: Docs is a static docs definition + items: + description: 'DevPortalDocsSpec is a static documentation definition: instead of using a Selector for finding documentation for services, users can provide a static list of : tuples. These services will be shown in the Dev Portal with the documentation obtained from this URL.' + properties: + service: + description: Service is the service being documented + type: string + url: + description: URL is the URL used for obtaining docs + type: string + type: object + type: array + naming_scheme: + description: Describes how to display "services" in the DevPortal. Default namespace.name + enum: + - namespace.name + - name.prefix + type: string + search: + description: DevPortalSearchSpec allows configuration over search functionality for the DevPortal + properties: + enabled: + type: boolean + type: + description: 'Type of search. "title-only" does a fuzzy search over openapi and page titles "all-content" will fuzzy search over all openapi and page content. "title-only" is the default. warning: using all-content may incur a larger memory footprint' + enum: + - title-only + - all-content + type: string + type: object + selector: + description: Selector is used for choosing what is shown in the DevPortal + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels specifies the list of labels that must be present in Mappings for being present in this DevPortal. + type: object + matchNamespaces: + description: MatchNamespaces is a list of namespaces that will be included in this DevPortal. + items: + type: string + type: array + type: object + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_hosts.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_hosts.yaml new file mode 100644 index 000000000..ccc7abd92 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_hosts.yaml @@ -0,0 +1,246 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: hosts.getambassador.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.hostname + name: Hostname + type: string + - JSONPath: .status.state + name: State + type: string + - JSONPath: .status.phaseCompleted + name: Phase Completed + type: string + - JSONPath: .status.phasePending + name: Phase Pending + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: getambassador.io + names: + categories: + - ambassador-crds + kind: Host + listKind: HostList + plural: hosts + singular: host + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Host is the Schema for the hosts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HostSpec defines the desired state of Host + properties: + acmeProvider: + description: Specifies whether/who to talk ACME with to automatically manage the $tlsSecret. + properties: + authority: + description: Specifies who to talk ACME with to get certs. Defaults to Let's Encrypt; if "none" (case-insensitive), do not try to do ACME for this Host. + type: string + email: + type: string + privateKeySecret: + description: "Specifies the Kubernetes Secret to use to store the private key of the ACME account (essentially, where to store the auto-generated password for the auto-created ACME account). You should not normally need to set this--the default value is based on a combination of the ACME authority being registered wit and the email address associated with the account. \n Note that this is a native-Kubernetes-style core.v1.LocalObjectReference, not an Ambassador-style `{name}.{namespace}` string. Because we're opinionated, it does not support referencing a Secret in another namespace (because most native Kubernetes resources don't support that), but if we ever abandon that opinion and decide to support non-local references it, it would be by adding a `namespace:` field by changing it from a core.v1.LocalObjectReference to a core.v1.SecretReference, not by adopting the `{name}.{namespace}` notation." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + registration: + description: This is normally set automatically + type: string + type: object + ambassador_id: + description: Common to all Ambassador objects (and optional). + items: + type: string + oneOf: + - type: string + - type: array + ambassadorId: + description: A compatibility alias for "ambassador_id"; because Host used to be specified with protobuf, and jsonpb allowed either "ambassador_id" or "ambassadorId", and even though we didn't tell people about "ambassadorId" it's what the web policy console generated because of jsonpb. So Hosts with 'ambassadorId' exist in the wild. + items: + type: string + oneOf: + - type: string + - type: array + hostname: + description: Hostname by which the Ambassador can be reached. + type: string + previewUrl: + description: Configuration for the Preview URL feature of Service Preview. Defaults to preview URLs not enabled. + properties: + enabled: + description: Is the Preview URL feature enabled? + type: boolean + type: + description: What type of Preview URL is allowed? + enum: + - Path + type: string + type: object + requestPolicy: + description: Request policy definition. + properties: + insecure: + properties: + action: + enum: + - Redirect + - Reject + - Route + type: string + additionalPort: + type: integer + type: object + type: object + selector: + description: Selector by which we can find further configuration. Defaults to hostname=$hostname + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + tls: + description: TLS configuration. It is not valid to specify both `tlsContext` and `tls`. + properties: + alpn_protocols: + type: string + ca_secret: + type: string + cacert_chain_file: + type: string + cert_chain_file: + type: string + cert_required: + type: boolean + cipher_suites: + items: + type: string + type: array + ecdh_curves: + items: + type: string + type: array + max_tls_version: + type: string + min_tls_version: + type: string + private_key_file: + type: string + redirect_cleartext_from: + type: integer + sni: + type: string + type: object + tlsContext: + description: "Name of the TLSContext the Host resource is linked with. It is not valid to specify both `tlsContext` and `tls`. \n Note that this is a native-Kubernetes-style core.v1.LocalObjectReference, not an Ambassador-style `{name}.{namespace}` string. Because we're opinionated, it does not support referencing a Secret in another namespace (because most native Kubernetes resources don't support that), but if we ever abandon that opinion and decide to support non-local references it, it would be by adding a `namespace:` field by changing it from a core.v1.LocalObjectReference to a core.v1.SecretReference, not by adopting the `{name}.{namespace}` notation." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + tlsSecret: + description: "Name of the Kubernetes secret into which to save generated certificates. If ACME is enabled (see $acmeProvider), then the default is $hostname; otherwise the default is \"\". If the value is \"\", then we do not do TLS for this Host. \n Note that this is a native-Kubernetes-style core.v1.LocalObjectReference, not an Ambassador-style `{name}.{namespace}` string. Because we're opinionated, it does not support referencing a Secret in another namespace (because most native Kubernetes resources don't support that), but if we ever abandon that opinion and decide to support non-local references it, it would be by adding a `namespace:` field by changing it from a core.v1.LocalObjectReference to a core.v1.SecretReference, not by adopting the `{name}.{namespace}` notation." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: object + status: + description: HostStatus defines the observed state of Host + properties: + errorBackoff: + type: string + errorReason: + description: errorReason, errorTimestamp, and errorBackoff are valid when state==Error. + type: string + errorTimestamp: + format: date-time + type: string + phaseCompleted: + description: phaseCompleted and phasePending are valid when state==Pending or state==Error. + enum: + - NA + - DefaultsFilled + - ACMEUserPrivateKeyCreated + - ACMEUserRegistered + - ACMECertificateChallenge + type: string + phasePending: + description: phaseCompleted and phasePending are valid when state==Pending or state==Error. + enum: + - NA + - DefaultsFilled + - ACMEUserPrivateKeyCreated + - ACMEUserRegistered + - ACMECertificateChallenge + type: string + state: + description: The first value listed in the Enum marker becomes the "zero" value, and it would be great if "Pending" could be the default value; but it's Important that the "zero" value be able to be shown as empty/omitted from display, and we really do want `kubectl get hosts` to say "Pending" in the "STATE" column, and not leave the column empty. + enum: + - Initial + - Pending + - Ready + - Error + type: string + tlsCertificateSource: + enum: + - Unknown + - None + - Other + - ACME + type: string + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesendpointresolvers.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesendpointresolvers.yaml new file mode 100644 index 000000000..88b73d2d8 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesendpointresolvers.yaml @@ -0,0 +1,54 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: kubernetesendpointresolvers.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: KubernetesEndpointResolver + listKind: KubernetesEndpointResolverList + plural: kubernetesendpointresolvers + singular: kubernetesendpointresolver + scope: Namespaced + validation: + openAPIV3Schema: + description: KubernetesEndpointResolver is the Schema for the kubernetesendpointresolver API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubernetesEndpointResolver tells Ambassador to use Kubernetes Endpoints resources to resolve services. It actually has no spec other than the AmbassadorID. + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesserviceresolvers.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesserviceresolvers.yaml new file mode 100644 index 000000000..98b5d302e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_kubernetesserviceresolvers.yaml @@ -0,0 +1,54 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: kubernetesserviceresolvers.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: KubernetesServiceResolver + listKind: KubernetesServiceResolverList + plural: kubernetesserviceresolvers + singular: kubernetesserviceresolver + scope: Namespaced + validation: + openAPIV3Schema: + description: KubernetesServiceResolver is the Schema for the kubernetesserviceresolver API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KubernetesServiceResolver tells Ambassador to use Kubernetes Service resources to resolve services. It actually has no spec other than the AmbassadorID. + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_logservices.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_logservices.yaml new file mode 100644 index 000000000..a726defe5 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_logservices.yaml @@ -0,0 +1,83 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: logservices.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: LogService + listKind: LogServiceList + plural: logservices + singular: logservice + scope: Namespaced + validation: + openAPIV3Schema: + description: LogService is the Schema for the logservices API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogServiceSpec defines the desired state of LogService + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + driver: + enum: + - tcp + - http + type: string + driver_config: + properties: + additional_log_headers: + items: + properties: + during_request: + type: boolean + during_response: + type: boolean + during_trailer: + type: boolean + header_name: + type: string + type: object + type: array + type: object + flush_interval_byte_size: + type: integer + flush_interval_time: + type: integer + grpc: + type: boolean + service: + type: string + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_mappings.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_mappings.yaml new file mode 100644 index 000000000..c61eefb5f --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_mappings.yaml @@ -0,0 +1,431 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: mappings.getambassador.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.host + name: Source Host + type: string + - JSONPath: .spec.prefix + name: Source Prefix + type: string + - JSONPath: .spec.service + name: Dest Service + type: string + - JSONPath: .status.state + name: State + type: string + - JSONPath: .status.reason + name: Reason + type: string + group: getambassador.io + names: + categories: + - ambassador-crds + kind: Mapping + listKind: MappingList + plural: mappings + singular: mapping + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Mapping is the Schema for the mappings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MappingSpec defines the desired state of Mapping + properties: + add_linkerd_headers: + type: boolean + add_request_headers: + additionalProperties: + oneOf: + - type: string + - type: boolean + - type: object + type: object + add_response_headers: + additionalProperties: + oneOf: + - type: string + - type: boolean + - type: object + type: object + allow_upgrade: + description: "A case-insensitive list of the non-HTTP protocols to allow \"upgrading\" to from HTTP via the \"Connection: upgrade\" mechanism[1]. After the upgrade, Ambassador does not interpret the traffic, and behaves similarly to how it does for TCPMappings. \n [1]: https://tools.ietf.org/html/rfc7230#section-6.7 \n For example, if your upstream service supports WebSockets, you would write \n allow_upgrade: - websocket \n Or if your upstream service supports upgrading from HTTP to SPDY (as the Kubernetes apiserver does for `kubectl exec` functionality), you would write \n allow_upgrade: - spdy/3.1" + items: + type: string + type: array + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + auth_context_extensions: + additionalProperties: + type: string + type: object + auto_host_rewrite: + type: boolean + bypass_auth: + type: boolean + bypass_error_response_overrides: + description: If true, bypasses any `error_response_overrides` set on the Ambassador module. + type: boolean + case_sensitive: + type: boolean + circuit_breakers: + items: + properties: + max_connections: + type: integer + max_pending_requests: + type: integer + max_requests: + type: integer + max_retries: + type: integer + priority: + enum: + - default + - high + type: string + type: object + type: array + cluster_idle_timeout_ms: + type: integer + cluster_max_connection_lifetime_ms: + type: integer + cluster_tag: + type: string + connect_timeout_ms: + type: integer + cors: + properties: + credentials: + type: boolean + exposed_headers: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + headers: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + max_age: + type: string + methods: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + origins: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + type: object + docs: + description: DocsInfo provides some extra information about the docs for the Mapping (used by the Dev Portal) + properties: + display_name: + type: string + ignored: + type: boolean + path: + type: string + url: + type: string + type: object + enable_ipv4: + type: boolean + enable_ipv6: + type: boolean + envoy_override: + description: UntypedDict is relatively opaque as a Go type, but it preserves its contents in a roundtrippable way. + type: object + error_response_overrides: + description: Error response overrides for this Mapping. Replaces all of the `error_response_overrides` set on the Ambassador module, if any. + items: + description: A response rewrite for an HTTP error response + properties: + body: + description: The new response body + properties: + content_type: + description: The content type to set on the error response body when using text_format or text_format_source. Defaults to 'text/plain'. + type: string + json_format: + additionalProperties: + type: string + description: 'A JSON response with content-type: application/json. The values can contain format text like in text_format.' + type: object + text_format: + description: A format string representing a text response body. Content-Type can be set using the `content_type` field below. + type: string + text_format_source: + description: A format string sourced from a file on the Ambassador container. Useful for larger response bodies that should not be placed inline in configuration. + properties: + filename: + description: The name of a file on the Ambassador pod that contains a format text string. + type: string + type: object + type: object + on_status_code: + description: The status code to match on -- not a pointer because it's required. + maximum: 599 + minimum: 400 + type: integer + required: + - body + - on_status_code + type: object + minItems: 1 + type: array + grpc: + type: boolean + headers: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + host: + type: string + host_redirect: + type: boolean + host_regex: + type: boolean + host_rewrite: + type: string + idle_timeout_ms: + type: integer + keepalive: + properties: + idle_time: + type: integer + interval: + type: integer + probes: + type: integer + type: object + labels: + additionalProperties: + description: A MappingLabelGroupsArray is an array of MappingLabelGroups. I know, complex. + items: + additionalProperties: + description: 'A MappingLabelsArray is the value in the MappingLabelGroup: an array of label specifiers.' + items: + description: A MappingLabelSpecifier (finally!) defines a single label. There are multiple kinds of label, so this is more complex than we'd like it to be. See the remarks about schema on custom types in `./common.go`. + type: array + description: 'A MappingLabelGroup is a single element of a MappingLabelGroupsArray: a second map, where the key is a human-readable name that identifies the group.' + type: object + type: array + description: A DomainMap is the overall Mapping.spec.Labels type. It maps domains (kind of like namespaces for Mapping labels) to arrays of label groups. + type: object + load_balancer: + properties: + cookie: + properties: + name: + type: string + path: + type: string + ttl: + type: string + required: + - name + type: object + header: + type: string + policy: + enum: + - round_robin + - ring_hash + - maglev + - least_request + type: string + source_ip: + type: boolean + required: + - policy + type: object + method: + type: string + method_regex: + type: boolean + modules: + items: + description: UntypedDict is relatively opaque as a Go type, but it preserves its contents in a roundtrippable way. + type: object + type: array + outlier_detection: + type: string + path_redirect: + description: Path replacement to use when generating an HTTP redirect. Used with `host_redirect`. + type: string + precedence: + type: integer + prefix: + type: string + prefix_exact: + type: boolean + prefix_redirect: + description: Prefix rewrite to use when generating an HTTP redirect. Used with `host_redirect`. + type: string + prefix_regex: + type: boolean + priority: + type: string + query_parameters: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + redirect_response_code: + description: The response code to use when generating an HTTP redirect. Defaults to 301. Used with `host_redirect`. + enum: + - 301 + - 302 + - 303 + - 307 + - 308 + type: integer + regex_headers: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + regex_query_parameters: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + regex_redirect: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + description: Prefix regex rewrite to use when generating an HTTP redirect. Used with `host_redirect`. + type: object + regex_rewrite: + additionalProperties: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + type: object + remove_request_headers: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + remove_response_headers: + description: StringOrStringList is just what it says on the tin, but note that it will always marshal as a list of strings right now. + items: + type: string + oneOf: + - type: string + - type: array + resolver: + type: string + retry_policy: + properties: + num_retries: + type: integer + per_try_timeout: + type: string + retry_on: + enum: + - 5xx + - gateway-error + - connect-failure + - retriable-4xx + - refused-stream + - retriable-status-codes + type: string + type: object + rewrite: + type: string + service: + type: string + shadow: + type: boolean + timeout_ms: + description: The timeout for requests that use this Mapping. Overrides `cluster_request_timeout_ms` set on the Ambassador Module, if it exists. + type: integer + tls: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + use_websocket: + description: 'use_websocket is deprecated, and is equivlaent to setting `allow_upgrade: ["websocket"]`' + type: boolean + weight: + type: integer + required: + - prefix + - service + type: object + status: + description: MappingStatus defines the observed state of Mapping + properties: + reason: + type: string + state: + enum: + - "" + - Inactive + - Running + type: string + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_modules.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_modules.yaml new file mode 100644 index 000000000..f60fa89c6 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_modules.yaml @@ -0,0 +1,56 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: modules.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: Module + listKind: ModuleList + plural: modules + singular: module + scope: Namespaced + validation: + openAPIV3Schema: + description: "A Module defines system-wide configuration. The type of module is controlled by the .metadata.name; valid names are \"ambassador\" or \"tls\". \n https://www.getambassador.io/docs/edge-stack/latest/topics/running/ambassador/#the-ambassador-module https://www.getambassador.io/docs/edge-stack/latest/topics/running/tls/#tls-module-deprecated" + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + config: + description: UntypedDict is relatively opaque as a Go type, but it preserves its contents in a roundtrippable way. + type: object + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_ratelimitservices.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_ratelimitservices.yaml new file mode 100644 index 000000000..97562444f --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_ratelimitservices.yaml @@ -0,0 +1,72 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: ratelimitservices.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: RateLimitService + listKind: RateLimitServiceList + plural: ratelimitservices + singular: ratelimitservice + scope: Namespaced + validation: + openAPIV3Schema: + description: RateLimitService is the Schema for the ratelimitservices API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RateLimitServiceSpec defines the desired state of RateLimitService + properties: + ambassador_id: + description: Common to all Ambassador objects. + items: + type: string + oneOf: + - type: string + - type: array + domain: + type: string + protocol_version: + enum: + - v2 + - v3 + type: string + service: + type: string + timeout_ms: + type: integer + tls: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + required: + - service + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tcpmappings.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tcpmappings.yaml new file mode 100644 index 000000000..f4c295245 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tcpmappings.yaml @@ -0,0 +1,102 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: tcpmappings.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: TCPMapping + listKind: TCPMappingList + plural: tcpmappings + singular: tcpmapping + scope: Namespaced + validation: + openAPIV3Schema: + description: TCPMapping is the Schema for the tcpmappings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPMappingSpec defines the desired state of TCPMapping + properties: + address: + type: string + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + circuit_breakers: + items: + properties: + max_connections: + type: integer + max_pending_requests: + type: integer + max_requests: + type: integer + max_retries: + type: integer + priority: + enum: + - default + - high + type: string + type: object + type: array + cluster_tag: + type: string + enable_ipv4: + type: boolean + enable_ipv6: + type: boolean + host: + type: string + idle_timeout_ms: + description: 'FIXME(lukeshu): Surely this should be an ''int''?' + type: string + port: + description: Port isn't a pointer because it's required. + type: integer + resolver: + type: string + service: + type: string + tls: + description: BoolOrString is a type that can hold a Boolean or a string. + oneOf: + - type: string + - type: boolean + weight: + type: integer + required: + - port + - service + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tlscontexts.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tlscontexts.yaml new file mode 100644 index 000000000..c7ff23605 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tlscontexts.yaml @@ -0,0 +1,100 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: tlscontexts.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: TLSContext + listKind: TLSContextList + plural: tlscontexts + singular: tlscontext + scope: Namespaced + validation: + openAPIV3Schema: + description: TLSContext is the Schema for the tlscontexts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TLSContextSpec defines the desired state of TLSContext + properties: + alpn_protocols: + type: string + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + ca_secret: + type: string + cacert_chain_file: + type: string + cert_chain_file: + type: string + cert_required: + type: boolean + cipher_suites: + items: + type: string + type: array + ecdh_curves: + items: + type: string + type: array + hosts: + items: + type: string + type: array + max_tls_version: + enum: + - v1.0 + - v1.1 + - v1.2 + - v1.3 + type: string + min_tls_version: + enum: + - v1.0 + - v1.1 + - v1.2 + - v1.3 + type: string + private_key_file: + type: string + redirect_cleartext_from: + type: integer + secret: + type: string + secret_namespacing: + type: boolean + sni: + type: string + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tracingservices.yaml b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tracingservices.yaml new file mode 100644 index 000000000..a106a22a5 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/getambassador.io_tracingservices.yaml @@ -0,0 +1,101 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: tracingservices.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: TracingService + listKind: TracingServiceList + plural: tracingservices + singular: tracingservice + scope: Namespaced + validation: + openAPIV3Schema: + description: TracingService is the Schema for the tracingservices API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TracingServiceSpec defines the desired state of TracingService + properties: + ambassador_id: + description: "AmbassadorID declares which Ambassador instances should pay attention to this resource. May either be a string or a list of strings. If no value is provided, the default is: \n ambassador_id: - \"default\"" + items: + type: string + oneOf: + - type: string + - type: array + config: + properties: + access_token_file: + type: string + collector_cluster: + type: string + collector_endpoint: + type: string + collector_endpoint_version: + enum: + - HTTP_JSON_V1 + - HTTP_JSON + - HTTP_PROTO + type: string + collector_hostname: + type: string + service_name: + type: string + shared_span_context: + type: boolean + trace_id_128bit: + type: boolean + type: object + driver: + enum: + - lightstep + - zipkin + - datadog + type: string + sampling: + properties: + client: + type: integer + overall: + type: integer + random: + type: integer + type: object + service: + type: string + tag_headers: + items: + type: string + type: array + required: + - driver + - service + type: object + type: object + version: null + versions: + - name: v2 + served: true + storage: true + - name: v1 + served: true + storage: false diff --git a/charts/ambassador/ambassador/6.7.1100/crds/project.yaml b/charts/ambassador/ambassador/6.7.1100/crds/project.yaml new file mode 100644 index 000000000..ac3a22e27 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/project.yaml @@ -0,0 +1,34 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: projects.getambassador.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.prefix + name: Prefix + type: string + - JSONPath: .spec.githubRepo + name: Repo + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: getambassador.io + names: + categories: + - ambassador-crds + kind: Project + plural: projects + singular: project + scope: Namespaced + subresources: + status: {} + versions: + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/projectcontroller.yaml b/charts/ambassador/ambassador/6.7.1100/crds/projectcontroller.yaml new file mode 100644 index 000000000..93bf4ea65 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/projectcontroller.yaml @@ -0,0 +1,24 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: projectcontrollers.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: ProjectController + plural: projectcontrollers + singular: projectcontroller + scope: Namespaced + subresources: + status: {} + versions: + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/projectrevision.yaml b/charts/ambassador/ambassador/6.7.1100/crds/projectrevision.yaml new file mode 100644 index 000000000..6457bef4a --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/projectrevision.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: projectrevisions.getambassador.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.project.name + name: Project + type: string + - JSONPath: .spec.ref + name: Ref + type: string + - JSONPath: .spec.rev + name: Rev + type: string + - JSONPath: .status.phase + name: Status + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: getambassador.io + names: + categories: + - ambassador-crds + kind: ProjectRevision + plural: projectrevisions + singular: projectrevision + scope: Namespaced + subresources: + status: {} + versions: + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/crds/ratelimit.yaml b/charts/ambassador/ambassador/6.7.1100/crds/ratelimit.yaml new file mode 100644 index 000000000..30b85101c --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/crds/ratelimit.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + helm.sh/hook: crd-install + labels: + app.kubernetes.io/name: ambassador + product: aes + name: ratelimits.getambassador.io +spec: + group: getambassador.io + names: + categories: + - ambassador-crds + kind: RateLimit + plural: ratelimits + shortNames: + - rl + singular: ratelimit + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: false + - name: v2 + served: true + storage: true diff --git a/charts/ambassador/ambassador/6.7.1100/ct.yaml b/charts/ambassador/ambassador/6.7.1100/ct.yaml new file mode 100644 index 000000000..ee4f605c8 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/ct.yaml @@ -0,0 +1,37 @@ +# See https://github.com/helm/chart-testing + +# note: all the values files in ci/*-values.yaml will +# be tested automatically. For each configuration, +# all the tests in templates/tests/*.yaml +# will be checked. + +################################################ +# github +################################################ + +remote: origin + +################################################ +# chart +################################################ + +charts: + - /charts/ +chart-dirs: + - /charts/ +chart-repos: + - datawire=https://getambassador.io + +helm-extra-args: --timeout 600s + +# namespace: ambassador +# release-label: release + +################################################ +# checks and validations +################################################ + +validate-maintainers: false +validate-chart-schema: true +validate-yaml: true +# check-version-increment: true diff --git a/charts/ambassador/ambassador/6.7.1100/questions.yml b/charts/ambassador/ambassador/6.7.1100/questions.yml new file mode 100644 index 000000000..bf13b9471 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/questions.yml @@ -0,0 +1,84 @@ +questions: +### CRD Management +- variable: crds.enabled + label: Create CRDs + description: "Should Ambassador Edge Stack create and manage its CRD's?" + type: boolean + required: false + default: "true" + group: "CRD Management" +- variable: crds.keep + label: Keep CRDs + description: "Should Ambassador Edge Stack keep CRD's when the chart is uninstalled?" + type: boolean + required: false + default: "true" + group: "CRD Management" + show_if: "crds.enabled=true" + +### Deployment Management +- variable: daemonSet + label: Deploy as Daemonset + description: "Deploy Ambassador Edge Stack as a Daemonset? (Recommended: false)" + type: boolean + required: false + default: "true" + group: "Deployment Settings" +- variable: replicaCount + label: Replica Count + description: "How many replicas should Ambassador Edge Stack run? (Recommended: 3)" + type: int + required: false + default: "3" + group: "Deployment Settings" + min: 1 + max: 999 + show_if: "daemonSet=false" + +### Service Settings +- variable: service.type + label: Service Type + description: "Set the type of service, LoadBalancer (recommended), NodePort, or ClusterIP" + type: enum + required: false + default: "LoadBalancer" + group: "Service Settings" + options: + - "LoadBalancer" + - "ClusterIP" + - "NodePort" + +### Licensing +- variable: licenseKey.createSecret + label: "Create License Key Secret" + description: "Creates the license key secret using the License Key Data." + type: boolean + required: false + default: "true" + group: "License Settings" +- variable: licenseKey.value + label: "License Key Data" + description: "Specifies the license key to apply." + type: secret + required: false + default: "" + group: "License Settings" + show_if: "licenseKey.createSecret=true" + +### Service Catalog +- variable: agent.enabled + label: "Enable Service Catalog" + description: "Enables the Service Catalog agent for use at https://app.getambassador.io." + type: boolean + required: false + default: "true" + group: "Service Catalog" +- variable: agent.cloudConnectionToken + label: "Cloud Connection Token" + description: "Specifies the Token used to register a Cluster with the Service Catalog." + type: secret + required: false + default: "" + group: "Service Catalog" + show_if: "agent.enabled=true" + \ No newline at end of file diff --git a/charts/ambassador/ambassador/6.7.1100/templates/NOTES.txt b/charts/ambassador/ambassador/6.7.1100/templates/NOTES.txt new file mode 100644 index 000000000..359073a3f --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/NOTES.txt @@ -0,0 +1,60 @@ +------------------------------------------------------------------------------- +{{- if .Values.enableAES }} +Congratulations! You have successfully installed The Ambassador Edge Stack! + +{{- if empty .Values.licenseKey.value }} +------------------------------------------------------------------------------- +NOTE: You are currently running The Ambassador Edge Stack in EVALUATION MODE. + +Request a free community license key at https://SERVICE_IP/edge_stack_admin/#dashboard +to unlock all the features of The Ambassador Edge Stack and update the value of +licenseKey.value in your values.yaml file. +{{- end }} + +{{- if or .Values.authService.create .Values.rateLimit.create }} +------------------------------------------------------------------------------- +WARNING: + +With your installation of the Ambassador Edge Stack, you have created a: +{{ if .Values.authService.create }} +- AuthService named {{include "ambassador.fullname" .}}-auth +{{ end }} {{ if .Values.rateLimit.create }} +- RateLimitService named {{include "ambassador.fullname" .}}-ratelimit +{{ end }} +in the {{ include "ambassador.namespace" . }} namespace. + +Please ensure there is not another of these resources configured in your cluster. +If there is, please either remove the old resource or run + +helm upgrade {{ .Release.Name }} -n {{ .Release.Namespace }} --set authService.create=false --set RateLimit.create=false + +{{- end }} +{{- else }} + Congratulations! You've successfully installed Ambassador! + +------------------------------------------------------------------------------- +To get the IP address of Ambassador, run the following commands: + +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "ambassador.namespace" .}} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ambassador.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "ambassador.namespace" .}} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w --namespace {{ include "ambassador.namespace" .}} {{ include "ambassador.fullname" . }}' + + On GKE/Azure: + export SERVICE_IP=$(kubectl get svc --namespace {{ include "ambassador.namespace" .}} {{ include "ambassador.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + + On AWS: + export SERVICE_IP=$(kubectl get svc --namespace {{ include "ambassador.namespace" .}} {{ include "ambassador.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "ambassador.namespace" .}} -l "app={{ include "ambassador.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:80 +{{- end }} +{{- end }} + +For help, visit our Slack at http://a8r.io/Slack or view the documentation online at https://www.getambassador.io. diff --git a/charts/ambassador/ambassador/6.7.1100/templates/_helpers.tpl b/charts/ambassador/ambassador/6.7.1100/templates/_helpers.tpl new file mode 100644 index 000000000..58a1eb455 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/_helpers.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ambassador.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ambassador.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "ambassador.imagetag" -}} +{{- if .Values.image.fullImageOverride }} + {{- .Values.image.fullImageOverride }} +{{- else }} + {{- if hasKey .Values.image "tag" -}} + {{- .Values.image.tag }} + {{- else if .Values.enableAES }} + {{- .Values.image.aesTag }} + {{- else }} + {{- .Values.image.ossTag }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Set the image that should be used for ambassador. +Use fullImageOverride if present, +Then if the image repository is explicitly set, use "repository:image" +Otherwise, check if AES is enabled +Use AES image if AES is enabled, ambassador image if not +*/}} +{{- define "ambassador.image" -}} +{{- if .Values.image.fullImageOverride }} + {{- .Values.image.fullImageOverride }} +{{- else }} + {{- $repoName := "" }} + {{- $imageTag := "" }} + {{- if hasKey .Values.image "repository" -}} + {{- $repoName = .Values.image.repository }} + {{- else if .Values.enableAES }} + {{- $repoName = .Values.image.aesRepository }} + {{- else }} + {{- $repoName = .Values.image.ossRepository }} + {{- end -}} + {{- if hasKey .Values.image "tag" -}} + {{- $imageTag = .Values.image.tag }} + {{- else if .Values.enableAES }} + {{- $imageTag = .Values.image.aesTag }} + {{- else }} + {{- $imageTag = .Values.image.ossTag }} + {{- end -}} + {{- printf "%s:%s" $repoName $imageTag -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart namespace based on override value. +*/}} +{{- define "ambassador.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ambassador.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ambassador.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ambassador.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the RBAC to use +*/}} +{{- define "ambassador.rbacName" -}} +{{ default (include "ambassador.fullname" .) .Values.rbac.nameOverride }} +{{- end -}} + +{{/* +Define the http port of the Ambassador service +*/}} +{{- define "ambassador.servicePort" -}} +{{- range .Values.service.ports -}} +{{- if (eq .name "http") -}} +{{ default .port }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/admin-service.yaml b/charts/ambassador/ambassador/6.7.1100/templates/admin-service.yaml new file mode 100644 index 000000000..34db11f29 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/admin-service.yaml @@ -0,0 +1,64 @@ +{{- if .Values.adminService.create -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ambassador.fullname" . }}-admin + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/part-of: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + # Hard-coded label for Prometheus Operator ServiceMonitor + service: ambassador-admin + product: aes + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge Stack admin service for internal use and health checks." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: "None" + {{- with .Values.adminService.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.adminService.type }} + ports: + - port: {{ .Values.adminService.port }} + targetPort: admin + protocol: TCP + name: ambassador-admin + {{- if (and (eq .Values.adminService.type "NodePort") (not (empty .Values.adminService.nodePort))) }} + nodePort: {{ int .Values.adminService.nodePort }} + {{- end }} + - port: {{ .Values.adminService.snapshotPort }} + targetPort: {{ .Values.adminService.snapshotPort }} + protocol: TCP + name: ambassador-snapshot + selector: + {{- if .Values.service.selector }} + {{ toYaml .Values.service.selector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + {{- if eq .Values.adminService.type "LoadBalancer" }} + {{- if not (empty .Values.adminService.loadBalancerIP) }} + loadBalancerIP: {{ .Values.adminService.loadBalancerIP | quote }} + {{- end }} + {{- if not (empty .Values.adminService.loadBalancerSourceRanges) }} + loadBalancerSourceRanges: + {{- toYaml .Values.adminService.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-authservice.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-authservice.yaml new file mode 100644 index 000000000..b4c61bb0e --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-authservice.yaml @@ -0,0 +1,33 @@ +{{ if and .Values.authService.create .Values.enableAES }} +--- +apiVersion: getambassador.io/v2 +kind: AuthService +metadata: + name: {{ include "ambassador.fullname" . }}-{{ .Values.authService.deploymentExtraName | default "auth" }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-auth + {{- end }} + product: aes +spec: + proto: grpc + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + auth_service: 127.0.0.1:8500 + {{- if .Values.authService.optional_configurations }} + {{- toYaml .Values.authService.optional_configurations | nindent 2}} + {{- end }} +{{ end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-injector.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-injector.yaml new file mode 100644 index 000000000..03bd3bd95 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-injector.yaml @@ -0,0 +1,161 @@ +{{- if and .Values.enableAES .Values.servicePreview.enabled .Values.servicePreview.trafficAgent.injector.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ambassador.fullname" . }}-injector + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes + spec: + containers: + - name: webhook + {{- if .Values.servicePreview.trafficAgent.image.repository }} + image: "{{ .Values.servicePreview.trafficAgent.image.repository }}:{{ .Values.servicePreview.trafficAgent.image.tag | default .Values.image.tag }}" + {{- else }} + image: {{ include "ambassador.image" . }} + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: [ "aes-injector" ] + env: + - name: AGENT_MANAGER_NAMESPACE + value: "{{ include "ambassador.namespace" . }}" + - name: TRAFFIC_AGENT_IMAGE + value: "{{ .Values.servicePreview.trafficAgent.image.repository | default .Values.image.repository }}:{{ .Values.servicePreview.trafficAgent.image.tag | default .Values.image.tag }}" + - name: TRAFFIC_AGENT_AGENT_LISTEN_PORT + value: "{{ .Values.servicePreview.trafficAgent.port }}" + {{- if .Values.servicePreview.trafficAgent.singleNamespace }} + - name: TRAFFIC_AGENT_SERVICE_ACCOUNT_NAME + value: "{{ .Values.servicePreview.trafficAgent.serviceAccountName }}" + {{- end }} + ports: + - containerPort: 8443 + name: https + livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + volumeMounts: + - mountPath: /var/run/secrets/tls + name: tls + readOnly: true + volumes: + - name: tls + secret: + secretName: {{ include "ambassador.fullname" . }}-injector-tls +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ambassador.fullname" . }}-injector + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge Stack Service Preview Traffic Agent Sidecar injector." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: "None" +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - name: {{ include "ambassador.fullname" . }}-injector + port: 443 + targetPort: https +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ include "ambassador.fullname" . }}-injector-tls + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector-tls + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +type: Opaque +data: + {{ $ca := genCA (printf "%s-injector.%s.svc" (include "ambassador.fullname" .) (include "ambassador.namespace" .)) 365 -}} + crt.pem: {{ ternary (b64enc $ca.Cert) (b64enc (trim .Values.servicePreview.trafficAgent.injector.crtPEM)) (empty .Values.servicePreview.trafficAgent.injector.crtPEM) }} + key.pem: {{ ternary (b64enc $ca.Key) (b64enc (trim .Values.servicePreview.trafficAgent.injector.keyPEM)) (empty .Values.servicePreview.trafficAgent.injector.keyPEM) }} +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ include "ambassador.fullname" . }}-injector-webhook-config + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-injector-webhook-config + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +webhooks: +- name: {{ include "ambassador.fullname" . }}-injector.getambassador.io + clientConfig: + service: + name: {{ include "ambassador.fullname" . }}-injector + namespace: {{ include "ambassador.namespace" . }} + path: "/traffic-agent" + caBundle: {{ ternary (b64enc $ca.Cert) (b64enc (trim .Values.servicePreview.trafficAgent.injector.crtPEM)) (empty .Values.servicePreview.trafficAgent.injector.crtPEM) }} + failurePolicy: Ignore + rules: + - operations: ["CREATE"] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-internal.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-internal.yaml new file mode 100644 index 000000000..b210d0b20 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-internal.yaml @@ -0,0 +1,129 @@ +{{ if and .Values.createDevPortalMappings .Values.enableAES }} +--- +# Configure DevPortal +apiVersion: getambassador.io/v2 +kind: Mapping +metadata: + # This Mapping name is referenced by convention, it's important to leave as-is. + name: {{ include "ambassador.fullname" . }}-devportal + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-devportal + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + prefix: {{ .Values.devportal.docsPrefix }} + rewrite: "/docs/" + service: "127.0.0.1:8500" +--- +apiVersion: getambassador.io/v2 +kind: Mapping +metadata: + name: {{ include "ambassador.fullname" . }}-devportal-assets + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-devportal-assets + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + prefix: /documentation/(assets|styles)/(.*)(.css) + prefix_regex: true + regex_rewrite: + pattern: /documentation/(.*) + substitution: /docs/\1 + service: "127.0.0.1:8500" + add_response_headers: + cache-control: + value: "public, max-age=3600, immutable" + append: false +--- +apiVersion: getambassador.io/v2 +kind: Mapping +metadata: + # This Mapping name is what the demo uses. Sigh. + name: {{ include "ambassador.fullname" . }}-devportal-demo + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-devportal-demo + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + prefix: /docs/ + rewrite: "/docs/" + service: "127.0.0.1:8500" +--- +apiVersion: getambassador.io/v2 +kind: Mapping +metadata: + # This Mapping name is referenced by convention, it's important to leave as-is. + name: {{ include "ambassador.fullname" . }}-devportal-api + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-devportal-api + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + prefix: /openapi/ + rewrite: "" + service: "127.0.0.1:8500" +{{ end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-ratelimit.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-ratelimit.yaml new file mode 100644 index 000000000..fdb2ddbcd --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-ratelimit.yaml @@ -0,0 +1,29 @@ +{{ if and .Values.rateLimit.create .Values.enableAES }} +--- +apiVersion: getambassador.io/v2 +kind: RateLimitService +metadata: + name: {{ include "ambassador.fullname" . }}-{{ .Values.rateLimit.deploymentExtraName | default "ratelimit" }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-ratelimit + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + service: 127.0.0.1:8500 +{{ end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-redis.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-redis.yaml new file mode 100644 index 000000000..e0bbe1931 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-redis.yaml @@ -0,0 +1,106 @@ +{{ if and .Values.redis.create .Values.enableAES }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ambassador.fullname" . }}-redis + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-redis + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- else }} + product: aes + {{- end }} + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge Stack Redis store for auth and rate limiting, among other things." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: "None" + {{- with .Values.redis.annotations.service }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - port: 6379 + targetPort: 6379 + selector: + {{- if .Values.redis.serviceSelector }} + {{ toYaml .Values.redis.serviceSelector | nindent 4 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-redis + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ambassador.fullname" . }}-redis + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-redis + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + annotations: + {{- toYaml .Values.redis.annotations.deployment | nindent 4}} +spec: + replicas: 1 + selector: + matchLabels: + {{- if .Values.redis.serviceSelector }} + {{ toYaml .Values.redis.serviceSelector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-redis + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + template: + metadata: + labels: + {{- if .Values.redis.serviceSelector }} + {{ toYaml .Values.redis.serviceSelector | nindent 8 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-redis + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + spec: + containers: + - name: redis + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + resources: + {{- toYaml .Values.redis.resources | nindent 10 }} + restartPolicy: Always + {{- with .Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.redis.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.redis.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{ end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/aes-secret.yaml b/charts/ambassador/ambassador/6.7.1100/templates/aes-secret.yaml new file mode 100644 index 000000000..9829d93fe --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/aes-secret.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.licenseKey.createSecret .Values.enableAES }} +apiVersion: v1 +kind: Secret +metadata: + {{- if ne .Values.deploymentTool "getambassador.io" }} + annotations: + helm.sh/resource-policy: keep + {{- end }} + {{- if .Values.licenseKey.annotations }} + {{- toYaml .Values.licenseKey.annotations | nindent 4 }} + {{- end }} + {{- if .Values.licenseKey.secretName }} + name: {{ .Values.licenseKey.secretName }} + {{- else }} + name: {{ include "ambassador.fullname" . }}-edge-stack + {{- end }} + namespace: {{ include "ambassador.namespace" . }} +type: Opaque +data: + license-key: {{- if .Values.licenseKey.value }} {{ .Values.licenseKey.value | b64enc }} {{- else }} "" {{- end }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/ambassador-agent.yaml b/charts/ambassador/ambassador/6.7.1100/templates/ambassador-agent.yaml new file mode 100644 index 000000000..0d70f4c8b --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/ambassador-agent.yaml @@ -0,0 +1,371 @@ +{{- if .Values.agent.enabled }} +{{- $allowAgent := false -}} + + {{- /* This next bit is ugly. */ -}} + {{- /* Case 1: "fullImageOverride" means don't bother checking the tag. */ -}} + {{- /* Case 2: Otherwise, if it's not a semver-style version number, */ -}} + {{- /* assume we have a power user and turn the agent on. */ -}} + {{- /* Case 3: Otherwise, if Edge Stack, we need at least 1.12.0. */ -}} + {{- /* Case 4: Otherwise, it's OSS and we need at 1.13.0. */ -}} + +{{- if .Values.image.fullImageOverride }} + {{- /* Case 1 */ -}} + {{- $allowAgent = true }} +{{- else if not (regexMatch "^\\d+\\.\\d+\\.\\d+$" (include "ambassador.imagetag" . )) }} + {{- /* Case 2 above: power user */ -}} + {{- $allowAgent = true }} +{{- else if and .Values.enableAES (ne (semver "1.12.0" | (semver (include "ambassador.imagetag" . )).Compare) -1) }} + {{- /* Case 3 above: Edge Stack 1.12.0+ */ -}} + {{- $allowAgent = true }} +{{- else if ne (semver "1.13.0" | (semver (include "ambassador.imagetag" . )).Compare) -1 }} + {{- /* Case 4 above: OSS 1.13.0+ */ -}} + {{- $allowAgent = true }} +{{- end }} + +{{- if $allowAgent }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ambassador.fullname" . }}-agent + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +{{- if .Values.docker.useImagePullSecret }} +imagePullSecrets: +- name: {{ .Values.docker.imagePullSecretName }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ include "ambassador.fullname" . }}-agent-config + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ambassador.fullname" . }}-agent-config +subjects: +- kind: ServiceAccount + name: {{ include "ambassador.fullname" . }}-agent + namespace: {{ include "ambassador.namespace" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ include "ambassador.fullname" . }}-agent-config + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: [""] + resources: [ "configmaps" ] + verbs: [ "get", "list", "watch" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ambassador.fullname" . }}-agent + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ambassador.fullname" . }}-agent +subjects: +- kind: ServiceAccount + name: {{ include "ambassador.fullname" . }}-agent + namespace: {{ include "ambassador.namespace" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent +rules: [] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-pods + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: [""] + resources: [ "pods"] + verbs: [ "get", "list", "watch" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-deployments + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: ["apps", "extensions"] + resources: [ "deployments" ] + verbs: [ "get", "list", "watch" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-endpoints + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: [""] + resources: [ "endpoints" ] + verbs: [ "get", "list", "watch" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-configmaps + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: [""] + resources: [ "configmaps" ] + verbs: [ "get", "list", "watch" ] +--- +{{- if .Values.agent.createArgoRBAC }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-rollouts + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: ["argoproj.io"] + resources: [ "rollouts" ] + verbs: [ "get", "list", "watch" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.fullname" . }}-agent-applications + labels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }}-agent + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: ["argoproj.io"] + resources: [ "applications" ] + verbs: [ "get", "list", "watch" ] +{{- end }} +{{ if ne .Values.agent.cloudConnectToken "" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "ambassador.fullname" . }}-agent-cloud-token + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-agent-cloud-token + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +data: + CLOUD_CONNECT_TOKEN: {{ .Values.agent.cloudConnectToken }} +{{ end }} + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ambassador.fullname" . }}-agent + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-agent + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-agent + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-agent + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + product: aes + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "ambassador.fullname" . }}-agent + containers: + - name: agent + image: {{ include "ambassador.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: [ "agent" ] + env: + - name: AGENT_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AGENT_CONFIG_RESOURCE_NAME + value: {{ include "ambassador.fullname" . }}-agent-cloud-token + - name: RPC_CONNECTION_ADDRESS + value: {{ .Values.agent.rpcAddress }} + - name: AES_SNAPSHOT_URL + value: "http://{{ include "ambassador.fullname" . }}-admin.{{ include "ambassador.namespace" . }}:{{ .Values.adminService.snapshotPort }}/snapshot-external" +{{- end }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/config.yaml b/charts/ambassador/ambassador/6.7.1100/templates/config.yaml new file mode 100644 index 000000000..b2c2d64bc --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/config.yaml @@ -0,0 +1,20 @@ +{{- if .Values.ambassadorConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ include "ambassador.fullname" . }}-file-config' + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +data: + ambassadorConfig: |- + {{- .Values.ambassadorConfig | nindent 4 }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/crd-delete.yaml b/charts/ambassador/ambassador/6.7.1100/templates/crd-delete.yaml new file mode 100644 index 000000000..0099dedf8 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/crd-delete.yaml @@ -0,0 +1,123 @@ +{{- if and .Values.crds.enabled (not .Values.crds.keep)}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ambassador.serviceAccountName" . }}-crd-delete + namespace: {{ include "ambassador.namespace" . }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded + "helm.sh/hook-weight": "1" + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.rbacName" . }}-crd-delete + namespace: {{ include "ambassador.namespace" . }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded + "helm.sh/hook-weight": "1" + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +rules: + - apiGroups: [ "apiextensions.k8s.io" ] + resources: [ "customresourcedefinitions" ] + verbs: ["get", "list", "watch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ambassador.rbacName" . }}-crd-delete + namespace: {{ include "ambassador.namespace" . }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded + "helm.sh/hook-weight": "1" + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ambassador.rbacName" . }}-crd-delete +subjects: + - name: {{ include "ambassador.serviceAccountName" . }}-crd-delete + namespace: {{ include "ambassador.namespace" . }} + kind: ServiceAccount +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ambassador.fullname" . }}-crd-cleanup + namespace: {{ include "ambassador.namespace" . }} + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + "helm.sh/hook-weight": "3" + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +spec: + template: + metadata: + name: {{ include "ambassador.fullname" . }}-crd-cleanup + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + spec: + {{- if .Values.rbac.create }} + serviceAccountName: {{ include "ambassador.serviceAccountName" . }}-crd-delete + {{- end }} + containers: + - name: kubectl + image: "buoyantio/kubectl" + args: + - delete + - crds + - -l app.kubernetes.io/name=ambassador + restartPolicy: OnFailure +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/crds.yaml b/charts/ambassador/ambassador/6.7.1100/templates/crds.yaml new file mode 100644 index 000000000..3b3bf16d5 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.crds.create }} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }} +{{ $.Files.Get $path }} +--- +{{- end }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/deployment.yaml b/charts/ambassador/ambassador/6.7.1100/templates/deployment.yaml new file mode 100644 index 000000000..762cf2d9c --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/deployment.yaml @@ -0,0 +1,282 @@ +apiVersion: apps/v1 +{{- if .Values.daemonSet }} +kind: DaemonSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + {{- if .Values.deploymentNameOverride }} + name: {{ .Values.deploymentNameOverride }} + {{- else }} + name: {{ include "ambassador.fullname" . }} + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + {{- if .Values.deploymentLabels }} + {{- toYaml .Values.deploymentLabels | nindent 4 }} + {{- end }} + {{- if .Values.deploymentAnnotations }} + annotations: + {{- toYaml .Values.deploymentAnnotations | nindent 4 }} + {{- end }} +spec: +{{- if and (not .Values.autoscaling.enabled) (not .Values.daemonSet) }} + replicas: {{ .Values.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- if .Values.service.selector }} + {{ toYaml .Values.service.selector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + {{- if .Values.daemonSet }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + {{- toYaml .Values.deploymentStrategy | nindent 4}} + template: + metadata: + labels: + {{- if .Values.service.selector }} + {{ toYaml .Values.service.selector | nindent 8 }} + {{- end }} + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + app.kubernetes.io/instance: {{ .Release.Name }} + product: aes + {{- end }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + annotations: + {{- if ne .Values.deploymentTool "getambassador.io" }} + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- /* Check if .Values.securityContext is set for backwards compatibility */ -}} + {{- if .Values.securityContext -}} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else -}} + {{- with .Values.security.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end -}} + {{- if .Values.restartPolicy }} + restartPolicy: {{ .Values.restartPolicy }} + {{- end }} + serviceAccountName: {{ include "ambassador.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + volumes: + - name: ambassador-pod-info + downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + {{- if .Values.prometheusExporter.enabled }} + - name: stats-exporter-mapping-config + configMap: + name: {{ include "ambassador.fullname" . }}-exporter-config + items: + - key: exporterConfiguration + path: mapping-config.yaml + {{- end }} + {{- if .Values.ambassadorConfig }} + - name: ambassador-config + configMap: + name: {{ include "ambassador.fullname" . }}-file-config + items: + - key: ambassadorConfig + path: ambassador-config.yaml + {{- end }} + {{- if and .Values.licenseKey.createSecret .Values.enableAES }} + - name: {{ include "ambassador.fullname" . }}-edge-stack-secrets + secret: + {{- if .Values.licenseKey.secretName }} + secretName: {{ .Values.licenseKey.secretName }} + {{- else }} + secretName: {{ include "ambassador.fullname" . }}-edge-stack + {{- end }} + {{- end }} + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.prometheusExporter.enabled }} + - name: prometheus-exporter + image: "{{ .Values.prometheusExporter.repository }}:{{ .Values.prometheusExporter.tag }}" + imagePullPolicy: {{ .Values.prometheusExporter.pullPolicy }} + ports: + - name: metrics + containerPort: 9102 + - name: listener + containerPort: 8125 + args: + - --statsd.listen-udp=:8125 + - --web.listen-address=:9102 + - --statsd.mapping-config=/statsd-exporter/mapping-config.yaml + volumeMounts: + - name: stats-exporter-mapping-config + mountPath: /statsd-exporter/ + readOnly: true + resources: + {{- toYaml .Values.prometheusExporter.resources | nindent 12 }} + {{- end }} + - name: {{ if .Values.containerNameOverride }}{{ .Values.containerNameOverride }}{{ else }}{{ .Chart.Name }}{{ end }} + image: {{ include "ambassador.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + {{- range .Values.service.ports }} + - name: {{ .name }} + containerPort: {{ int .targetPort }} + {{- if .protocol }} + protocol: {{ .protocol }} + {{- end }} + {{- if .hostPort }} + hostPort: {{ .hostPort }} + {{- end }} + {{- end}} + - name: admin + containerPort: {{ .Values.adminService.port }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if and (or .Values.redis.create .Values.redisURL) (.Values.enableAES) }} + - name: REDIS_URL + {{- if .Values.redisURL }} + value: {{ .Values.redisURL }} + {{- else }} + value: {{ include "ambassador.fullname" . }}-redis:6379 + {{- end }} + {{- end }} + {{- if and .Values.licenseKey.secretName .Values.enableAES}} + - name: AMBASSADOR_AES_SECRET_NAME + value: {{ .Values.licenseKey.secretName }} + {{- end }} + {{- if .Values.prometheusExporter.enabled }} + - name: STATSD_ENABLED + value: "true" + - name: STATSD_HOST + value: "localhost" + {{- end }} + {{- if .Values.scope.singleNamespace }} + - name: AMBASSADOR_SINGLE_NAMESPACE + value: "YES" + {{- end }} + - name: AMBASSADOR_NAMESPACE + {{- if .Values.namespace }} + value: {{ .Values.namespace.name | quote }} + {{ else }} + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end -}} + {{- if .Values.redisEnv }} + {{ toYaml .Values.redisEnv | nindent 12 }} + {{- end }} + {{- if .Values.env }} + {{- range $key,$value := .Values.env }} + - name: {{ $key | upper | quote}} + value: {{ $value | quote}} + {{- end }} + {{- end }} + {{- if .Values.envRaw }} + {{- with .Values.envRaw }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- with .Values.security.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /ambassador/v0/check_alive + port: admin + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + httpGet: + path: /ambassador/v0/check_ready + port: admin + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumeMounts: + - name: ambassador-pod-info + mountPath: /tmp/ambassador-pod-info + readOnly: true + {{- if .Values.ambassadorConfig }} + - name: ambassador-config + mountPath: /ambassador/ambassador-config/ambassador-config.yaml + subPath: ambassador-config.yaml + {{- end }} + {{- if and .Values.licenseKey.createSecret .Values.enableAES }} + - name: {{ include "ambassador.fullname" . }}-edge-stack-secrets + mountPath: /.config/ambassador + readOnly: true + {{- end }} + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.sidecarContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 8 }} + dnsPolicy: {{ .Values.dnsPolicy }} + hostNetwork: {{ .Values.hostNetwork }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/exporter-config.yaml b/charts/ambassador/ambassador/6.7.1100/templates/exporter-config.yaml new file mode 100644 index 000000000..69b817f9d --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/exporter-config.yaml @@ -0,0 +1,23 @@ +{{- if .Values.prometheusExporter.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: '{{ include "ambassador.fullname" . }}-exporter-config' + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +data: + exporterConfiguration: +{{- if .Values.prometheusExporter.configuration }} | + {{- .Values.prometheusExporter.configuration | nindent 4 }} +{{- else }} '' +{{- end }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/hpa.yaml b/charts/ambassador/ambassador/6.7.1100/templates/hpa.yaml new file mode 100644 index 000000000..18cbbdbf6 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/hpa.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.autoscaling.enabled (not .Values.daemonSet) }} +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "ambassador.fullname" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ambassador.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/module.yaml b/charts/ambassador/ambassador/6.7.1100/templates/module.yaml new file mode 100644 index 000000000..6d481fef0 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/module.yaml @@ -0,0 +1,29 @@ +{{- if .Values.module }} +apiVersion: getambassador.io/v2 +kind: Module +metadata: + name: ambassador + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + app.kubernetes.io/component: {{ include "ambassador.name" . }}-ratelimit + {{- end }} + product: aes +spec: + {{- if .Values.env }} + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- end }} + config: + {{- toYaml .Values.module | nindent 4 }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/namespace.yaml b/charts/ambassador/ambassador/6.7.1100/templates/namespace.yaml new file mode 100644 index 000000000..4535c74f2 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/namespace.yaml @@ -0,0 +1,8 @@ +{{- if .Values.createNamespace }} +apiVersion: v1 +kind: Namespace +metadata: + labels: + product: aes + name: {{ include "ambassador.namespace" . }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/oss-migration-test-service.yaml b/charts/ambassador/ambassador/6.7.1100/templates/oss-migration-test-service.yaml new file mode 100644 index 000000000..17f3c288d --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/oss-migration-test-service.yaml @@ -0,0 +1,33 @@ +{{- if .Values.enableTestService }} +apiVersion: v1 +kind: Service +metadata: + name: test-aes + namespace: {{ include "ambassador.namespace" . }} + labels: + product: aes +spec: + type: LoadBalancer + externalTrafficPolicy: Local + ports: + {{- range .Values.service.ports }} + - name: {{ .name }} + port: {{ int .port }} + {{- if .targetPort }} + targetPort: {{ int .targetPort }} + {{- end }} + {{- if .nodePort }} + nodePort: {{ int .nodePort }} + {{- end }} + {{- if .protocol }} + protocol: {{ .protocol }} + {{- end }} + {{- end}} + selector: + {{- if .Values.service.selector }} + {{ toYaml .Values.service.selector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/pdb.yaml b/charts/ambassador/ambassador/6.7.1100/templates/pdb.yaml new file mode 100644 index 000000000..4044fda60 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "ambassador.fullname" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/podsecuritypolicy.yaml b/charts/ambassador/ambassador/6.7.1100/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..3da289039 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/podsecuritypolicy.yaml @@ -0,0 +1,25 @@ +{{ if .Values.security.podSecurityPolicy }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ambassador.fullname" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- with .Values.security.podSecurityPolicy.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- with .Values.security.podSecurityPolicy.spec }} +spec: + {{- toYaml . | nindent 2}} +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/ambassador/ambassador/6.7.1100/templates/projects-rbac.yaml b/charts/ambassador/ambassador/6.7.1100/templates/projects-rbac.yaml new file mode 100644 index 000000000..ed1087f4b --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/projects-rbac.yaml @@ -0,0 +1,75 @@ +{{- if and .Values.rbac.create .Values.registry.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- if .Values.scope.singleNamespace }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + name: {{ include "ambassador.rbacName" . }}-projects + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +rules: +- apiGroups: [""] + resources: [ "secrets", "services" ] + verbs: [ "get", "list", "create", "patch", "delete", "watch" ] +- apiGroups: ["apps"] + resources: [ "deployments" ] + verbs: [ "get", "list", "create", "patch", "delete", "watch" ] +- apiGroups: ["batch"] + resources: [ "jobs" ] + verbs: [ "get", "list", "create", "patch", "delete", "watch" ] +- apiGroups: [""] + resources: [ "pods" ] + verbs: [ "get", "list", "watch" ] +- apiGroups: [""] + resources: [ "pods/log" ] + verbs: [ "get" ] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- if .Values.scope.singleNamespace }} +kind: RoleBinding +{{- else }} +kind: ClusterRoleBinding +{{- end }} +metadata: + name: {{ include "ambassador.rbacName" . }}-projects + {{- if .Values.scope.singleNamespace }} + namespace: {{ include "ambassador.namespace" . }} + {{- end }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + {{- if .Values.scope.singleNamespace }} + kind: Role + {{- else }} + kind: ClusterRole + {{- end }} + name: {{ include "ambassador.rbacName" . }}-projects +subjects: + - name: {{ include "ambassador.serviceAccountName" . }} + namespace: {{ include "ambassador.namespace" . }} + kind: ServiceAccount +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/projects.yaml b/charts/ambassador/ambassador/6.7.1100/templates/projects.yaml new file mode 100644 index 000000000..21b021a9a --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/projects.yaml @@ -0,0 +1,412 @@ +{{- if .Values.registry.create }} +###################################################################### +# In-cluster Registry for Projects + +# This mapping will make every host function as a docker +# registry. It's not ideal to take over the "v2" mapping, but there +# are a number of constraints that make this the least worst option +# explored so far. These constraints are: +# +# - We need a registry where docker push/pull and similar (e.g. crictl +# push/pull) can work with no special client configuration since we +# don't control the clients and we can't expect our users to +# reconfigure their clusters to use a special push/pull +# configuration. +# +# - GKE's push/pull implementation (I think it's docker) and crictl +# push/pull (used by default in k3s clusters) have different default +# behaviors with respect to localhost registries. The docker +# implementation is very permissive, it will try both cleartext and +# TLS and it does not verify the TLS connection, so self-signed +# registries work fine. The crictl implementation is moving in this +# direction, but the version used in k3s (based on rancher's fork of +# containerd at v1.3.3) is not there yet. It only tries cleartext by +# default. +# +# - We want to minimize the requirements for users to have the +# access/understanding to create special DNS configurations +# (e.g. wildcard or a separate dns name for the registry). +# +# - You can configure the docker registry to have a prefix, +# e.g. //v2/..., however without special +# configuration to override the defaults, clients can't push/pull +# from a registry served at a prefix. If your image is named +# /, the client will look for /v2/... endpoints. +# +# Given all the prior constraints we are left with creating this +# mapping for all hosts. If this is a problem there are a few +# alternatives we could consider. We can provide a way to limit this +# mapping to only one host so they can have distinct hosts for their +# site and their registry. We could also look into creating a +# daemonset that binds to localhost and proxies cleartext to +# TLS. Based on what I know of GKE and k3s its a good guess that this +# would accommodate both of them, but possibly not other clusters with +# different configurations. +# +# Another reason to lean towards an externally accessible registry is +# that there are likely some people that would want this as a feature +# so they can docker push/pull images from other systems into/out of +# the builtin registry. While it's true that security minded people +# might not like having this registry externally accessible, it's also +# quite likely those people would want to run their own fancy registry +# that scans/audits images, etc. The focus for RtC is really a smooth +# out of the box experience that functions end-to-end without +# requiring you to build your own platform. For more security minded +# people we should expect to eventually be able to configure an +# external registry and/or turn off the builtin one. +--- +apiVersion: getambassador.io/v2 +kind: Mapping +metadata: + name: {{ include "ambassador.fullname" . }}-registry + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-registry + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +spec: + prefix: /v2/ + rewrite: /v2/ + {{- if .Values.registry.resourceNameOverride }} + service: https://{{ .Values.registry.resourceNameOverride }} + {{- else }} + service: https://{{ include "ambassador.fullname" . }}-registry + {{- end }} + timeout_ms: 300000 +--- +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }} + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-registry + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge internal image registry." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: "None" +spec: + type: ClusterIP + selector: + {{- if .Values.registry.serviceSelectors }} + {{ toYaml .Values.registry.serviceSelector | nindent 4 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-registry + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + ports: + - port: 443 + targetPort: 5000 + +# The registry deployment. The deployment includes a persistent volume +# mount for storing images, a config-map mount for customizing the +# registry configuration, and a secret mounted for tls. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }} + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + app: registry +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 0 + selector: + matchLabels: + {{- if .Values.registry.serviceSelectors }} + {{ toYaml .Values.registry.serviceSelector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-registry + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + template: + metadata: + annotations: + foo: "5" + labels: + {{- if .Values.registry.serviceSelectors }} + {{ toYaml .Values.registry.serviceSelector | nindent 8 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.fullname" . }}-registry + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + spec: + containers: + - name: registry + image: registry:2 + ports: + - containerPort: 5000 + volumeMounts: + - mountPath: /var/lib/registry + name: registry-data + - name: registry-config + mountPath: /etc/docker/registry + - name: registry-tls + mountPath: /etc/tls + volumes: + - name: registry-config + configMap: + # Provide the name of the ConfigMap containing the files you want + # to add to the container + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }}-config + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry-config + {{- end }} + - name: registry-data + persistentVolumeClaim: + {{- if .Values.registry.resourceNameOverride }} + claimName: {{ .Values.registry.resourceNameOverride }}-data + {{- else }} + claimName: {{ include "ambassador.fullname" . }}-registry-data + {{- end }} + - name: registry-tls + secret: + {{- if .Values.registry.resourceNameOverride }} + secretName: {{ .Values.registry.resourceNameOverride }}-tls + {{- else }} + secretName: {{ include "ambassador.fullname" . }}-registry-tls + {{- end }} + +# The configuration file for our registry. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }}-config + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry-config + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-registry + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +data: + config.yml: | + version: 0.1 + log: + fields: + service: registry + storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /var/lib/registry + http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] + tls: + certificate: /etc/tls/tls.crt + key: /etc/tls/tls.key + health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 + +# The persistent volume for our registry. +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }}-data + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry-data + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-registry + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + +# The self-signed tls secret for our registry. We should look into +# generating this on install with a job. +--- +apiVersion: v1 +kind: Secret +metadata: + {{- if .Values.registry.resourceNameOverride }} + name: {{ .Values.registry.resourceNameOverride }}-tls + {{- else }} + name: {{ include "ambassador.fullname" . }}-registry-tls + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-registry + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +type: kubernetes.io/tls +data: + tls.crt: | + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVEekNDQXZlZ0F3SUJBZ0lVSVZrWlJGSkVJ + VCtOTlJiMFJ0TkxwZFp5TTVnd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2daWXhDekFKQmdOVkJBWVRB + bFZUTVJZd0ZBWURWUVFJREExTllYTnpZV05vZFhObGRIUnpNUk13RVFZRApWUVFIREFwVGIyMWxj + blpwYkd4bE1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuCmFXNWxa + WEpwYm1jeEVUQVBCZ05WQkFNTUNISmxaMmx6ZEhKNU1SNHdIQVlKS29aSWh2Y05BUWtCRmc5a1pY + WkEKWkdGMFlYZHBjbVV1YVc4d0hoY05NakF3TVRNd01qRXdNVFV5V2hjTk1qRXdNVEk1TWpFd01U + VXlXakNCbGpFTApNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQWdNRFUxaGMzTmhZMmgxYzJWMGRI + TXhFekFSQmdOVkJBY01DbE52CmJXVnlkbWxzYkdVeEVUQVBCZ05WQkFvTUNFUmhkR0YzYVhKbE1S + UXdFZ1lEVlFRTERBdEZibWRwYm1WbGNtbHUKWnpFUk1BOEdBMVVFQXd3SWNtVm5hWE4wY25reEhq + QWNCZ2txaGtpRzl3MEJDUUVXRDJSbGRrQmtZWFJoZDJseQpaUzVwYnpDQ0FTSXdEUVlKS29aSWh2 + Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTFRtZ21wb2szVVdCVkhqCjFqb2R5eG9LZFJad09Y + WnhiZ25ITXlMa2xxLzUydGdmTEJmVlU1TzB2aE5iVm5vcEVSRWdWV0pTd3dlN0dOS0EKSjlaWWxC + Qlc1Q1U5Q3FNalU2TTVOdTdiVWRQblNyNGRFSFlWcmhEakJYcVpDUElEaFhZS2ZZYWh0YlB4cis1 + egpueS9qQktKU2JwM3RWU3d5SEhsY3JJNHdOU2R1Q2x5UFplOFR0Q2hGQUxhcU5rWUMvclNGK0w0 + SWcwZmY1N0duClpFVmsyZDJja09Xbkp6akRXMGhYL3FUcXhUKzZwV2tUQThWQ0FVS2FabEY5VkRK + c20rOW1XM2dBWmZ5NWdFWloKajcvaktqNTd5R1BUR2xWQXhra2J2WlJJVWQ5LzVkVmE3V1RCYnlR + dkxvOEkyWWQ3S1h6Y3BjcElpS2hRREdPQQpHbGVoa2JVQ0F3RUFBYU5UTUZFd0hRWURWUjBPQkJZ + RUZGTDV5NnNIb09tV0FRWVVGano4VHNETGFnUTdNQjhHCkExVWRJd1FZTUJhQUZGTDV5NnNIb09t + V0FRWVVGano4VHNETGFnUTdNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHcKRFFZSktvWklodmNOQVFF + TEJRQURnZ0VCQUFZdHlnNDNDTEJsbVlvY0NkSjVpSlF0NTR0anFGU2hIMzdFd3h4WQp1QVExRHRW + a0Q3QngzUURZZ1cxeU1QYzFTRDhYenFUcWxjQUlOQTZwdVB0SlNPcC8wUUVqVFJSMkFSZFF5VURI + ClZOZEZzcHp5MGRnbllqOXY2ckl4akdOazVHZXI3cUp4TURaUUY0dC82NHZLYWNyOHZOQ3dnSmI5 + WEZaMTBjNlEKdVNSNVVVN1pMTWJPeWd4a0hPQStMMXp3S2pSaXZUb2ZMbExPOURQNUJwMk9hOGgr + TmZhVkJ4ZHFUS2l0UzFaOApnUnZhOTFuRHZwTjl5aHBiNFJVN2FoWW9tWGF4VE5ZVEJxVE1uZWhE + aWhPQjdBS2Z0VVErdjJWZ2VlM1FxaGJ4CjRUSlJpTTUxR2VIWEtoVWw5ZXBxRnBlYllIa1BnU1ln + bU1OUy9aT3JSWmFxajVRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: | + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZB + QVNDQktZd2dnU2lBZ0VBQW9JQkFRQzA1b0pxYUpOMUZnVlIKNDlZNkhjc2FDblVXY0RsMmNXNEp4 + ek1pNUphditkcllIeXdYMVZPVHRMNFRXMVo2S1JFUklGVmlVc01IdXhqUwpnQ2ZXV0pRUVZ1UWxQ + UXFqSTFPak9UYnUyMUhUNTBxK0hSQjJGYTRRNHdWNm1RanlBNFYyQ24yR29iV3o4YS91CmM1OHY0 + d1NpVW02ZDdWVXNNaHg1WEt5T01EVW5iZ3BjajJYdkU3UW9SUUMycWpaR0F2NjBoZmkrQ0lOSDMr + ZXgKcDJSRlpObmRuSkRscHljNHcxdElWLzZrNnNVL3VxVnBFd1BGUWdGQ21tWlJmVlF5Ykp2dlps + dDRBR1g4dVlCRwpXWSsvNHlvK2U4aGoweHBWUU1aSkc3MlVTRkhmZitYVld1MWt3VzhrTHk2UENO + bUhleWw4M0tYS1NJaW9VQXhqCmdCcFhvWkcxQWdNQkFBRUNnZ0VBWUxiMGRxdGVXclRoTnp6V0pk + QVQ2K0kzWXoyd214QmR3a0NMcUZZSjhoOWsKenpNclFicTlxalJ4Z3F2TWVoZEdscDl3eHRaMGlz + ZU9wOHY0Z0hKdkJxVk42RkxRUXhQNS9VUHppSlFkRld1TQozRU54cjVBN3RhK0tHRmVGSHM2Zkpk + TEo5WmF6TEhkRWxmbWUyOTFGZHZzWFJMdkVVNUtmQW90M2ZiVnNWWjFxCnRucVIzY0dET3JVQ00v + ZzJKZmVBYk5wSUJjTnlCV0diOGRQbm5SaHZRNW5YN1ozUnJiNTlhQnhOcldCSkFkbnEKOUtkS3BR + UmU4cjBiRGJ0WVZQamxXRldpOVluWVQ0WHpQOG9TU0t5a3R4TWZraEM2dlVKb0gwNHFOSmRkWjVM + WAozWjRKUm14RnlUZU1rUG0xa2dnSVVRZGJhRWp1WG0rOThOeXVkZitKcVFLQmdRRGx6SS9XMzZM + am1pRE9MSDVUCnFhZTFnazNMV2lTY3hwZzRhazEyenhLSlkrWUJiNnc4UG5EVmlvY2tPa0lsSERh + V0xzQ2VpRkJsM2lPSDlUWWcKQm9iY3JVZVNUbWdOaUNqSlpIWVhIUlY1TEN2bGE0UkhhcXNMWG43 + elptTE5GVW9YRlhaTkoyQzlqUEp5TStyQQpqOWJLWlFvQTF2NC9qOUdMTXN3eEJZem1pd0tCZ1FE + SmhxNDhrYmV0MlRTRFhyMUxuY3FMVU9wak1hQmNyOEJKCnpDNlBwK3F0ck01QVE1RnkwaHRoV2Zn + bDkzZU5vMWRQT2pCRDZ6amIyd2dNSHhBR2w1V0pIN005enFBSWJSaW0KbDFNcmsrUkprbUVGeUls + cU95TG9jNlg0V1pPN1BwejZPQkdWTExGOFlBR09UcldaRzZwUStDeVJWN3hHUS9PWAo4QlN5UVVh + d3Z3S0JnRWFXWG55dmQxYVlpb2txUzZlaFRuM0h4K08yRGRjR2ZjMmVnYXNFRW5xWGNCaHkyQ0l0 + ClAvV29OcmpmR0dCVDJVU3FtY3BZcnZHTG1iaHlqeXlwTkpYbXVEeHR6ektRNTQ1dFNJVHpEeHlJ + Zi9kWjNta2QKaityUEhRbmhJbXBDcHQ2T1hpZDIrQlZoalR1ZFRQZlhkeS8yZDJzb256S2hGOG05 + VWRHaEZkWGZBb0dBRkZ0QwpabVBoeGZIVzJCNU55TUdib0E4QVhoeTVNaU9lck5XdkxsdXIzUGRE + cmtJbEF4QXVLOXRHc2E4WnFIa0RiTUZYCjlzUmY3ZlZtRHJOa2p3WG8yUDBXd2Z1Sk50Q3VXTVdZ + WlNKL1FOOUVaYTBvRkU3ODY3WWk0YjlLcVBOZUwvaFIKN2x1aFlncmduVnRlQktWQ3d3TU9uVy9i + V00yc1lZQ2kxbzY1Y1VrQ2dZQUR4SUJmOGZUOURDS0NaZ1FvQXNDYwpvSzcvdzdDYk1hOEp5TjZa + ZDRiSlIrSzRzUEtQekd2M3dEandxRzFTRkN6UU1FR01mOWt6TWFYb09XdzNaN2NCCklIZTJDUXFF + N2NZdW1LYjFkOTFueU1qMVdQVC9CWEJKZzB3aUNMV0RjakdQR0xNWTJyeGsvMWwzL2xjKy9WVkcK + NjRZZUh1YlllOE9Iemp5UEZGSnJZdz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + +###################################################################### +# Project Controller +# +# Comment this out if you want to disable the micro CI/CD functionality: +--- +apiVersion: getambassador.io/v2 +kind: ProjectController +metadata: + {{- if .Values.registry.projectControllerName }} + name: {{ .Values.registry.projectControllerName }} + {{- else }} + name: {{ include "ambassador.fullname" . }}-projectcontroller + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }}-projectcontroller + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + projects.getambassador.io/ambassador_id: {{ if hasKey .Values.env "AMBASSADOR_ID" }}{{ .Values.env.AMBASSADOR_ID | quote }}{{ else }}default{{ end }} + product: aes +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/rbac.yaml b/charts/ambassador/ambassador/6.7.1100/templates/rbac.yaml new file mode 100644 index 000000000..1077c248d --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/rbac.yaml @@ -0,0 +1,200 @@ +{{- if .Values.rbac.create -}} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.rbacName" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }} +rules: [] +--- +# CRDs are cluster scoped resources, so they need to be in a cluster role, +# even if ambassador is running in single namespace mode +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ include "ambassador.rbacName" . }}-crd + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }} +rules: + - apiGroups: [ "apiextensions.k8s.io" ] + resources: [ "customresourcedefinitions" ] + verbs: ["get", "list", "watch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- if .Values.scope.singleNamespace }} +kind: Role +metadata: + name: {{ include "ambassador.rbacName" . }} + namespace: {{ include "ambassador.namespace" . }} +{{- else }} +kind: ClusterRole +metadata: + name: {{ include "ambassador.rbacName" . }}-watch +{{- end }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes + rbac.getambassador.io/role-group: {{ include "ambassador.rbacName" . }} +rules: + - apiGroups: [""] + resources: + - namespaces + - services + - secrets + - endpoints + verbs: ["get", "list", "watch"] + + - apiGroups: [ "getambassador.io" ] + resources: [ "*" ] + verbs: ["get", "list", "watch", "update", "patch", "create", "delete" ] + + - apiGroups: [ "getambassador.io" ] + resources: [ "mappings/status" ] + verbs: ["update"] + + - apiGroups: [ "networking.internal.knative.dev" ] + resources: [ "clusteringresses", "ingresses" ] + verbs: ["get", "list", "watch"] + + - apiGroups: [ "networking.x-k8s.io" ] + resources: [ "*" ] + verbs: ["get", "list", "watch"] + + - apiGroups: [ "networking.internal.knative.dev" ] + resources: [ "ingresses/status", "clusteringresses/status" ] + verbs: ["update"] + + - apiGroups: [ "extensions", "networking.k8s.io" ] + resources: [ "ingresses", "ingressclasses" ] + verbs: ["get", "list", "watch"] + + - apiGroups: [ "extensions", "networking.k8s.io" ] + resources: [ "ingresses/status" ] + verbs: ["update"] + + {{- if .Values.enableAES }} + + - apiGroups: [""] + resources: [ "secrets" ] + verbs: ["get", "list", "watch", "create", "update"] + + - apiGroups: [""] + resources: [ "events" ] + verbs: ["get", "list", "watch", "create", "patch"] + + - apiGroups: ["coordination.k8s.io"] + resources: [ "leases" ] + verbs: ["get", "create", "update"] + + - apiGroups: [""] + resources: [ "endpoints" ] + verbs: ["get", "list", "watch", "create", "update"] + {{- end }} + + {{- if or .Values.rbac.podSecurityPolicies .Values.security.podSecurityPolicy }} + + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- if .Values.rbac.podSecurityPolicies }} + {{- toYaml .Values.rbac.podSecurityPolicies | nindent 6 }} + {{- end }} + {{- if .Values.security.podSecurityPolicy }} + - {{ include "ambassador.fullname" . }} + {{- end }} + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ambassador.rbacName" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ambassador.rbacName" . }} +subjects: + - name: {{ include "ambassador.serviceAccountName" . }} + namespace: {{ include "ambassador.namespace" . }} + kind: ServiceAccount +--- +{{- if .Values.scope.singleNamespace }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ include "ambassador.rbacName" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ambassador.rbacName" . }} +subjects: + - name: {{ include "ambassador.serviceAccountName" . }} + namespace: {{ include "ambassador.namespace" . }} + kind: ServiceAccount +{{- end }} +{{- end -}} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/resolvers.yaml b/charts/ambassador/ambassador/6.7.1100/templates/resolvers.yaml new file mode 100644 index 000000000..43aa5ace2 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/resolvers.yaml @@ -0,0 +1,45 @@ +{{- if .Values.resolvers.endpoint.create }} +--- +apiVersion: getambassador.io/v2 +kind: KubernetesEndpointResolver +metadata: + name: {{ .Values.resolvers.endpoint.name }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +{{- if hasKey .Values.env "AMBASSADOR_ID" }} +spec: + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} +{{- end }} +{{- end }} +{{- if .Values.resolvers.consul.create }} +--- +apiVersion: getambassador.io/v2 +kind: ConsulResolver +metadata: + name: {{ .Values.resolvers.consul.name }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} +spec: + {{- if hasKey .Values.env "AMBASSADOR_ID" }} + ambassador_id: {{ .Values.env.AMBASSADOR_ID | quote }} + {{- end }} + {{- toYaml .Values.resolvers.consul.spec | nindent 2 }} +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/service.yaml b/charts/ambassador/ambassador/6.7.1100/templates/service.yaml new file mode 100644 index 000000000..a541c2234 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/service.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.service.nameOverride }} + name: {{ .Values.service.nameOverride }} + {{- else }} + name: {{ include "ambassador.fullname" . }} + {{- end }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + app.kubernetes.io/component: ambassador-service + product: aes + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: {{ include "ambassador.fullname" . }}-redis.{{ include "ambassador.namespace" . }} +{{- if .Values.service.annotations }} + {{- range $key, $value := .Values.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" + {{- end }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: "{{ .Values.service.externalTrafficPolicy }}" + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml .Values.service.sessionAffinityConfig | nindent 4 }} + {{- end }} + ports: + {{- range .Values.service.ports }} + - name: {{ .name }} + port: {{ int .port }} + {{- if .targetPort }} + targetPort: {{ int .targetPort }} + {{- end }} + {{- if .nodePort }} + nodePort: {{ int .nodePort }} + {{- end }} + {{- if .protocol }} + protocol: {{ .protocol }} + {{- end }} + {{- end}} + selector: + {{- if .Values.service.selector }} + {{ toYaml .Values.service.selector | nindent 6 }} + {{- else }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: + {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/serviceaccount.yaml b/charts/ambassador/ambassador/6.7.1100/templates/serviceaccount.yaml new file mode 100644 index 000000000..90c8ef085 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/serviceaccount.yaml @@ -0,0 +1,24 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ambassador.serviceAccountName" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + {{- if ne .Values.deploymentTool "getambassador.io" }} + app.kubernetes.io/name: {{ include "ambassador.name" . }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + {{- end }} + product: aes +{{- if .Values.docker.useImagePullSecret }} +imagePullSecrets: +- name: {{ .Values.docker.imagePullSecretName }} +{{- end }} +{{- end -}} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/servicemonitor.yaml b/charts/ambassador/ambassador/6.7.1100/templates/servicemonitor.yaml new file mode 100644 index 000000000..b2c8122a1 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/servicemonitor.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.adminService.create .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ambassador.fullname" . }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app: {{ include "ambassador.name" . }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- toYaml .Values.metrics.serviceMonitor.selector | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: ambassador-admin + path: /metrics + {{- with .Values.metrics.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "ambassador.namespace" . }} + selector: + matchLabels: + service: ambassador-admin +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/tests/test-ready.yaml b/charts/ambassador/ambassador/6.7.1100/templates/tests/test-ready.yaml new file mode 100644 index 000000000..ec96235f7 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/tests/test-ready.yaml @@ -0,0 +1,24 @@ +{{- if and (.Values.test.enabled) (not .Values.daemonSet) }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "ambassador.fullname" . }}-test-ready" + labels: + app.kubernetes.io/name: {{ include "ambassador.name" . }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: {{ .Values.test.image | default "busybox" }} + command: ['wget'] + args: ['{{ include "ambassador.fullname" . }}:{{ include "ambassador.servicePort" . }}/ambassador/v0/check_ready'] + restartPolicy: Never +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/templates/traffic-agent-rbac.yaml b/charts/ambassador/ambassador/6.7.1100/templates/traffic-agent-rbac.yaml new file mode 100644 index 000000000..783c0aed6 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/traffic-agent-rbac.yaml @@ -0,0 +1,135 @@ +{{- if and .Values.enableAES .Values.servicePreview.enabled }} +{{- if .Values.servicePreview.trafficAgent.singleNamespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + annotations: + # Required because Helm creates secrets before ServiceAccount, but service-account-token depends on an existing SA. + "helm.sh/hook": "pre-install" + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +--- +## Create a service-account-token for traffic-agent with a matching name. +## Since the ambassador-injector will use this token name, it must be deterministic and not auto-generated. +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + annotations: + kubernetes.io/service-account.name: traffic-agent +type: kubernetes.io/service-account-token +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +rules: + - apiGroups: [""] + resources: [ "namespaces", "services", "secrets" ] + verbs: ["get", "list", "watch"] + - apiGroups: [ "getambassador.io" ] + resources: [ "*" ] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ambassador.rbacName" . }} +subjects: + - name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + kind: ServiceAccount +{{- else }} +## If we install Service Preview cluster-wide, this means we can't use the 'traffic-agent' ServiceAccount +## as it does not exist in every namespace. We must instead grant new Roles to all ServiceAccounts (cluster-wide). +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +rules: + - apiGroups: [""] + resources: [ "namespaces", "services", "secrets" ] + verbs: ["get", "list", "watch"] + - apiGroups: [ "getambassador.io" ] + resources: [ "*" ] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.servicePreview.trafficAgent.serviceAccountName }} +subjects: + - name: system:serviceaccounts + kind: Group + apiGroup: rbac.authorization.k8s.io +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/ambassador/ambassador/6.7.1100/templates/traffic-manager.yaml b/charts/ambassador/ambassador/6.7.1100/templates/traffic-manager.yaml new file mode 100644 index 000000000..922bc5df4 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/templates/traffic-manager.yaml @@ -0,0 +1,190 @@ +{{- if and .Values.enableAES .Values.servicePreview.enabled }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- if .Values.scope.singleNamespace }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +rules: + - apiGroups: [""] + resources: ["namespaces", "services", "pods", "secrets"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +{{- if .Values.scope.singleNamespace }} +kind: RoleBinding +{{- else }} +kind: ClusterRoleBinding +{{- end }} +metadata: + name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +roleRef: + apiGroup: rbac.authorization.k8s.io + {{- if .Values.scope.singleNamespace }} + kind: Role + {{- else }} + kind: ClusterRole + {{- end }} + name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} +subjects: + - kind: ServiceAccount + name: {{ .Values.servicePreview.trafficManager.serviceAccountName }} + namespace: {{ include "ambassador.namespace" . }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: telepresence-proxy + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: telepresence-proxy + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + product: aes +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: telepresence-proxy + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: telepresence-proxy + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + containers: + - name: telepresence-proxy + {{- if .Values.servicePreview.trafficManager.image.repository }} + image: "{{ .Values.servicePreview.trafficManager.image.repository }}:{{ .Values.servicePreview.trafficManager.image.tag | default .Values.image.tag }}" + {{- else }} + image: {{ include "ambassador.image" . }} + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: [ "traffic-manager" ] + env: + {{- if .Values.scope.singleNamespace }} + - name: AMBASSADOR_SINGLE_NAMESPACE + value: "true" + {{- end }} + - name: AMBASSADOR_NAMESPACE + {{- if .Values.namespace }} + value: {{ .Values.namespace.name | quote }} + {{ else }} + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end -}} + {{- if or .Values.redis.create .Values.redisURL }} + - name: REDIS_URL + {{- if .Values.redisURL }} + value: {{ .Values.redisURL }} + {{- else }} + value: {{ include "ambassador.fullname" . }}-redis:6379 + {{- end }} + {{- end }} + ports: + - name: sshd + containerPort: 8022 + volumeMounts: + - mountPath: /tmp/ambassador-pod-info + name: pod-info + restartPolicy: Always + terminationGracePeriodSeconds: 0 + volumes: + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + name: pod-info + serviceAccountName: {{ .Values.servicePreview.trafficManager.serviceAccountName }} +--- +apiVersion: v1 +kind: Service +metadata: + name: telepresence-proxy + namespace: {{ include "ambassador.namespace" . }} + labels: + app.kubernetes.io/name: telepresence-proxy + app.kubernetes.io/part-of: {{ .Release.Name }} + helm.sh/chart: {{ include "ambassador.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.deploymentTool }} + app.kubernetes.io/managed-by: {{ .Values.deploymentTool }} + {{- else }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- end }} + annotations: + a8r.io/owner: "Ambassador Labs" + a8r.io/repository: github.com/datawire/ambassador + a8r.io/description: "The Ambassador Edge Stack Service Preview Telepresence Proxy." + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/chat: http://a8r.io/Slack + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/support: https://www.getambassador.io/about-us/support/ + a8r.io/dependencies: "None" +spec: + type: ClusterIP + clusterIP: None + selector: + app.kubernetes.io/name: telepresence-proxy + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - name: sshd + protocol: TCP + port: 8022 + - name: api + protocol: TCP + port: 8081 +{{- end }} diff --git a/charts/ambassador/ambassador/6.7.1100/values.yaml b/charts/ambassador/ambassador/6.7.1100/values.yaml new file mode 100644 index 000000000..17ebb9ab0 --- /dev/null +++ b/charts/ambassador/ambassador/6.7.1100/values.yaml @@ -0,0 +1,521 @@ +# Default values for ambassador. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Manually set metadata for the Release. +# +# Defaults to .Chart.Name +nameOverride: '' +# Defaults to .Release.Name-.Chart.Name unless .Release.Name contains "ambassador" +fullnameOverride: '' +# Defaults to .Release.Namespace +namespaceOverride: '' + +replicaCount: 3 +daemonSet: false + +# This will enable the test-ready Pod (https://github.com/datawire/ambassador-chart/blob/master/templates/tests/test-ready.yaml). +# It will spawn a busybox container to call Ambassador's check_ready endpoint to validate it is working correctly. +test: + enabled: true + image: busybox + +# Enable autoscaling using HorizontalPodAutoscaler +# daemonSet: true, autoscaling will be disabled +autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 60 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 60 + +podDisruptionBudget: {} + +# namespace: + # name: default + +# Additional container environment variable +# Uncomment or add additional environment variables for the container here. +env: {} + # Exposing statistics via StatsD + # STATSD_ENABLED: true + # STATSD_HOST: statsd-sink + # sets the minimum number of seconds between Envoy restarts + # AMBASSADOR_RESTART_TIME: 15 + # sets the number of seconds that the Envoy will wait for open connections to drain on a restart + # AMBASSADOR_DRAIN_TIME: 5 + # sets the number of seconds that Ambassador will wait for the old Envoy to clean up and exit on a restart + # AMBASSADOR_SHUTDOWN_TIME: 10 + # labels Ambassador with an ID to allow for configuring multiple Ambassadors in a cluster + # AMBASSADOR_ID: default + +# Additional container environment variable in raw YAML format +# Uncomment or add additional environment variables for the container here. +envRaw: {} +# - name: REDIS_PASSWORD +# value: password +# valueFrom: +# secretKeyRef: +# name: redis-password +# key: password +# - name: POD_IP +# valueFrom: +# fieldRef: +# fieldPath: status.podIP + +imagePullSecrets: [] + +security: + # Security Context for all containers in the pod. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podsecuritycontext-v1-core + podSecurityContext: + runAsUser: 8888 + # Security Context for the Ambassador container specifically + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core + containerSecurityContext: + allowPrivilegeEscalation: false + # A basic PodSecurityPolicy to ensure Ambassador is running with appropriate security permissions + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + # + # A set of reasonable defaults is outlined below. This is not created by default as it should only + # be created by a one Release. If you want to use the PodSecurityPolicy in the chart, create it in + # the "master" Release and then leave it unset in all others. Set the `rbac.podSecurityPolicies` + # in all non-"master" Releases. + podSecurityPolicy: {} + # # Add AppArmor and Seccomp annotations + # # https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + # annotations: + # spec: + # seLinux: + # rule: RunAsAny + # supplementalGroups: + # rule: 'MustRunAs' + # ranges: + # # Forbid adding the root group. + # - min: 1 + # max: 65535 + # fsGroup: + # rule: 'MustRunAs' + # ranges: + # # Forbid adding the root group. + # - min: 1 + # max: 65535 + # privileged: false + # allowPrivilegeEscalation: false + # runAsUser: + # rule: MustRunAsNonRoot + +image: + ossTag: 1.13.8 + aesTag: 1.13.8 + pullPolicy: IfNotPresent + ossRepository: docker.io/datawire/ambassador + aesRepository: docker.io/datawire/aes +dnsPolicy: ClusterFirst +hostNetwork: false + +service: + type: LoadBalancer + + # Note that target http ports need to match your ambassador configurations service_port + # https://www.getambassador.io/reference/modules/#the-ambassador-module + ports: + - name: http + port: 80 + targetPort: 8080 + # protocol: TCP + # nodePort: 30080 + # hostPort: 80 + - name: https + port: 443 + targetPort: 8443 + # protocol: TCP + # nodePort: 30443 + # hostPort: 443 + # TCPMapping_Port + # port: 2222 + # targetPort: 2222 + # protocol: TCP + # nodePort: 30222 + + externalTrafficPolicy: + + sessionAffinity: + + sessionAffinityConfig: + + externalIPs: [] + + annotations: {} + + ############################################################################# + ## Ambassador should be configured using CRD definition. If you want + ## to use annotations, the following is an example of annotating the + ## Ambassador service with global configuration manifest. + ## + ## See https://www.getambassador.io/reference/core/ambassador and + ## https://www.getambassador.io/reference/core/tls for more info + ############################################################################# + # + # getambassador.io/config: | + # --- + # apiVersion: ambassador/v1 + # kind: TLSContext + # name: ambassador + # secret: ambassador-certs + # hosts: ["*"] + # --- + # apiVersion: ambassador/v1 + # kind: Module + # name: ambassador + # config: + # admin_port: 8001 + # diag_port: 8877 + # diagnostics: + # enabled: true + # enable_grpc_http11_bridge: false + # enable_grpc_web: false + # enable_http10: false + # enable_ipv4: true + # enable_ipv6: false + # liveness_probe: + # enabled: true + # lua_scripts: + # readiness_probe: + # enabled: true + # server_name: envoy + # service_port: 8080 + # use_proxy_proto: false + # use_remote_address: true + # xff_num_trusted_hops: 0 + # x_forwarded_proto_redirect: false + # load_balancer: + # policy: round_robin + # circuit_breakers: + # max_connections: 2048 + # retry_policy: + # retry_on: "5xx" + # cors: + + # Manually set the name of the generated Service + nameOverride: + +adminService: + create: true + type: ClusterIP + port: 8877 + snapshotPort: 8005 + # NodePort used if type is NodePort + # nodePort: 38877 + annotations: {} + +rbac: + # Specifies whether RBAC resources should be created + create: true + # List of Pod Security Policies to use on the container. + # If security.podSecurityPolicy is set, it will be appended to the list + podSecurityPolicies: [] + # Name of the RBAC resources defaults to the name of the release. + # Set nameOverride when installing Ambassador with cluster-wide scope in + # different namespaces with the same release name to avoid conflicts. + nameOverride: + +scope: + # tells Ambassador to only use resources in the namespace or namespace set by namespace.name + singleNamespace: false + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +deploymentStrategy: + type: RollingUpdate + +restartPolicy: + +terminationGracePeriodSeconds: + +initContainers: [] + +sidecarContainers: [] + +livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 3 + failureThreshold: 3 + +readinessProbe: + initialDelaySeconds: 30 + periodSeconds: 3 + failureThreshold: 3 + + +volumes: [] + +volumeMounts: [] + +podLabels: {} + +podAnnotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "9102" + +deploymentLabels: {} + +deploymentAnnotations: {} + # configmap.reloader.stakater.com/auto: "true" + +resources: + # Recommended resource requests and limits for Ambassador + limits: + cpu: 1000m + memory: 600Mi + requests: + cpu: 200m + memory: 300Mi + +priorityClassName: '' + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +topologySpreadConstraints: [] + +ambassadorConfig: '' + +crds: + enabled: true + create: true + keep: true + +# Prometheus Operator ServiceMonitor configuration +# See documentation: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor +metrics: + serviceMonitor: + enabled: false + # interval: 30s + # scrapeTimeout: 30s + # selector: {} + +################################################################################ +## Ambassador Edge Stack Configuration ## +################################################################################ + +# The Ambassador Edge Stack is free for limited use without a license key. +# Go to https://{ambassador-host}/edge_stack/admin/#dashboard to register +# for a community license key. + +enableAES: true + +# Set createSecret: false is installing multiple releases of The Ambassador +# Edge Stack in the same namespace. +licenseKey: + value: + createSecret: true + secretName: + # Annotations to attach to the license-key-secret. + annotations: {} + +# The DevPortal is exposed at /docs/ endpoint in the AES container. +# Setting this to true will automatically create routes for the DevPortal. +createDevPortalMappings: true +devportal: + docsPrefix: /documentation/ + +# The Ambassador Edge Stack uses a redis instance for managing authentication, +# rate limiting, and sharing minor configuration details between pods for +# centralized management. These values configure the redis instance that ships +# by default with The Ambassador Edge Stack. +# +# URL of your redis instance. Defaults to redis instance created below. +redisURL: + +# Ambassador ships with a basic redis instance. Configure the deployment with the options below. +redis: + create: true + image: + repository: redis + tag: 5.0.1 + pullPolicy: IfNotPresent + # Annotations for Ambassador Pro's redis instance. + annotations: + deployment: {} + service: {} + resources: {} + # If you want to specify resources, uncomment the following + # lines and remove the curly braces after 'resources:'. + # These are placeholder values and must be tuned. + # limits: + # cpu: 100m + # memory: 256Mi + # requests: + # cpu: 50m + # memory: 128Mi + nodeSelector: {} + affinity: {} + tolerations: {} + + +# Configures the AuthService that ships with the Ambassador Edge Stack. +# Setting authService.create: false will not install the AES AuthService and +# allow you to define your own. +# +# Typically when using the AES, you will want to keep this set to true and use +# the External Filter to communicate with a custom authentication service. +# https://www.getambassador.io/reference/filter-reference/#filter-type-external +authService: + deploymentExtraName: auth + create: true + # Set additional configuration options. See https://www.getambassador.io/reference/services/auth-service for more information + optional_configurations: {} + # include_body: + # max_bytes: 4096 + # allow_partial: true + # status_on_error: + # code: 403 + # failure_mode_allow: false + # retry_policy: + # retry_on: "5xx" + # num_retries: 2 + # add_linkerd_headers: true + # timeout_ms: 30000 + + +# Configures the RateLimitService in the Ambassador Edge Stack. +# Keep this enabled to configure RateLimits in AES. +rateLimit: + create: true + deploymentExtraName: ratelimit + +# Projects are a beta feature of Ambassador that allow developers to stage and +# deploy code with nothing more than a Github repository. +# See: https://www.getambassador.io/docs/edge-stack/latest/topics/using/projects/ +registry: + create: false + +# Resolvers are used to configure the discovery service strategy for Ambasador Edge Stack. +# See: https://www.getambassador.io/docs/edge-stack/latest/topics/running/resolvers/ +resolvers: + endpoint: + create: false + name: endpoint + consul: + create: false + name: consul-dc1 + spec: {} + # Configuration for a Consul Resolver + # address: consul-server.default.svc.cluster.local:8500 + # datacenter: dc1 + +# Create and manage an Ambassador Module from the Helm Chart. See: +# https://www.getambassador.io/docs/edge-stack/latest/topics/running/ambassador +# for more info on the available options. +# +# Note: The Module can only be named ambassador. There can only be one Module +# installed per-namespace. +module: {} + +################################################################################ +## DEPRECATED configuration objects ## +################################################################################ + +# DEPRECATED: Ambassador now exposes the /metrics endpoint in Envoy. +# DEPRECATED: See https://www.getambassador.io/user-guide/monitoring#deployment for more information on how to use the /metrics endpoint +# +# DEPRECATED: Enabling the prometheus exporter creates a sidecar and configures ambassador to use it +prometheusExporter: + enabled: false + repository: prom/statsd-exporter + tag: v0.8.1 + pullPolicy: IfNotPresent + resources: {} + # If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 256Mi + # requests: + # cpu: 50m + # memory: 128Mi + # You can configure the statsd exporter to modify the behavior of mappings and other features. + # See documentation: https://github.com/prometheus/statsd_exporter/tree/v0.8.1#metric-mapping-and-configuration + # Uncomment the following line if you wish to specify a custom configuration: + # configuration: | + # --- + # mappings: + # - match: 'envoy.cluster.*.upstream_cx_connect_ms' + # name: "envoy_cluster_upstream_cx_connect_time" + # timer_type: 'histogram' + # labels: + # cluster_name: "$1" + +# DEPRECATED: Use security.podSecurityContext +# securityContext: +# runAsUser: 8888 + + +# Configures Service Preview that ships with the Ambassador Edge Stack and edgectl. +# Setting servicePreview.enabled: true will install the Traffic Agent Service Account, Traffic Manager with RBAC, and ambassador-injector +servicePreview: + enabled: false + trafficManager: + image: + # Leave blank to use image.repository and image.tag + repository: + tag: + serviceAccountName: traffic-manager + trafficAgent: + image: + # Leave blank to use image.repository and image.tag + repository: + tag: + singleNamespace: true + serviceAccountName: traffic-agent + port: 9900 + + # Configure the ambassador-injector webhook for Service Preview Traffic Agent automatic sidecar injection. + injector: + enabled: true + + # If no injector.crtPEM and injector.keyPEM are provided, a self-signed certificate will be issued + # for the Common Name (CN) of `..svc`, which is the cluster-internal DNS name + # for the service. + crtPEM: '' + keyPEM: '' + +# Configure the ambassador agent +agent: + enabled: true + # this will be empty when it first gets applied, then the user will edit the agent to + # make it start reporting + cloudConnectToken: '' + rpcAddress: https://app.getambassador.io/ + createArgoRBAC: true + image: + # Leave blank to use image.repository and image.tag + tag: + repository: + +deploymentTool: '' + +# configure docker to pull from private registry +docker: {} +createNamespace: false +enableTestService: false diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/.helmignore b/charts/artifactory-ha/artifactory-ha/3.0.1400/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/CHANGELOG.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/CHANGELOG.md new file mode 100644 index 000000000..788bf661c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/CHANGELOG.md @@ -0,0 +1,830 @@ +# JFrog Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file. + +## [3.0.14] - Jul 31, 2020 +* Update the README section on Nginx SSL termination to reflect the actual YAML structure. + +## [3.0.13] - Jul 30, 2020 +* Added condition to disable the migration scripts. + +## [3.0.12] - Jul 29, 2020 +* Document Artifactory node affinity. + +## [3.0.11] - Jul 28, 2020 +* Added maxConnections for persistent storage type aws-s3-v3. + +## [3.0.10] - Jul 28, 2020 +Bugfix / support for userPluginSecrets with Artifactory 7 + +## [3.0.9] - Jul 27, 2020 +* Add tpl to external database secrets. +* Modified `scheme` to `artifactory-ha.scheme` + +## [3.0.8] - Jul 23, 2020 +* Added condition to disable the migration init container. + +## [3.0.7] - Jul 21, 2020 +* Updated Artifactory-ha Chart to add node and primary labels to pods and service objects. + +## [3.0.6] - Jul 20, 2020 +* Support custom CA and certificates + +## [3.0.5] - Jul 13, 2020 +* Updated Artifactory version to 7.6.3 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.3 +* Fixed Mysql database jar path in `preStartCommand` in README + +## [3.0.4] - Jul 8, 2020 +* Move some postgresql values to where they should be according to the subchart + +## [3.0.3] - Jul 8, 2020 +* Set Artifactory access client connections to the same value as the access threads. + +## [3.0.2] - Jul 6, 2020 +* Updated Artifactory version to 7.6.2 +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [3.0.1] - Jul 01, 2020 +* Add dedicated ingress object for Replicator service when enabled + +## [3.0.0] - Jun 30, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update busybox tag version to `1.31.1` +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [2.6.0] - Jun 29, 2020 +* Updated Artifactory version to 7.6.1 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.1 +* Add tpl for external database secrets + +## [2.5.8] - Jun 25, 2020 +* Stop loading the Nginx stream module because it is now a core module + +## [2.5.7] - Jun 18, 2020 +* Fixes bootstrap configMap issue on member node + +## [2.5.6] - Jun 11, 2020 +* Support list of custom secrets + +## [2.5.5] - Jun 11, 2020 +* NOTES.txt fixed incorrect information + +## [2.5.4] - Jun 12, 2020 +* Updated Artifactory version to 7.5.7 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5.7 + +## [2.5.3] - Jun 8, 2020 +* Statically setting primary service type to ClusterIP. +* Prevents primary service from being exposed publicly when using LoadBalancer type on cloud providers. + +## [2.5.2] - Jun 8, 2020 +* Readme update - configuring Artifactory with oracledb + +## [2.5.1] - Jun 5, 2020 +* Fixes broken PDB issue upgrading from 6.x to 7.x + +## [2.5.0] - Jun 1, 2020 +* Updated Artifactory version to 7.5.5 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5 +* Fixes bootstrap configMap permission issue +* Update postgresql tag version to `9.6.18-debian-10-r7` + +## [2.4.10] - May 27, 2020 +* Added Tomcat maxThreads & acceptCount + +## [2.4.9] - May 25, 2020 +* Fixed postgresql README `image` Parameters + +## [2.4.8] - May 24, 2020 +* Fixed typo in README regarding migration timeout + +## [2.4.7] - May 19, 2020 +* Added metadata maxOpenConnections + +## [2.4.6] - May 07, 2020 +* Fix `installerInfo` string format + +## [2.4.5] - Apr 27, 2020 +* Updated Artifactory version to 7.4.3 + +## [2.4.4] - Apr 27, 2020 +* Change customInitContainers order to run before the "migration-ha-artifactory" initContainer + +## [2.4.3] - Apr 24, 2020 +* Fix `artifactory.persistence.awsS3V3.useInstanceCredentials` incorrect conditional logic +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [2.4.2] - Apr 16, 2020 +* Custom volume mounts in migration init container. + +## [2.4.1] - Apr 16, 2020 +* Fix broken support for gcpServiceAccount for googleStorage + +## [2.4.0] - Apr 14, 2020 +* Updated Artifactory version to 7.4.1 + +## [2.3.1] - April 13, 2020 +* Update README with helm v3 commands + +## [2.3.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml +* Bump postgresql tag version to `9.6.17-debian-10-r21` in values.yaml + +## [2.2.11] - Apr 8, 2020 +* Added recommended ingress annotation to avoid 413 errors + +## [2.2.10] - Apr 8, 2020 +* Moved migration scripts under `files` directory +* Support preStartCommand in migration Init container as `artifactory.migration.preStartCommand` + +## [2.2.9] - Apr 01, 2020 +* Support masterKey and joinKey as secrets + +## [2.2.8] - Apr 01, 2020 +* Ensure that the join key is also copied when provided by an external secret +* Migration container in primary and node statefulset now respects custom versions and the specified node/primary resources + +## [2.2.7] - Apr 01, 2020 +* Added cache-layer in chain definition of Google Cloud Storage template +* Fix readme use to `-hex 32` instead of `-hex 16` + +## [2.2.6] - Mar 31, 2020 +* Change the way the artifactory `command:` is set so it will properly pass a SIGTERM to java + +## [2.2.5] - Mar 31, 2020 +* Removed duplicate `artifactory-license` volume from primary node + +## [2.2.4] - Mar 31, 2020 +* Restore `artifactory-license` volume for the primary node + +## [2.2.3] - Mar 29, 2020 +* Add Nginx log options: stderr as logfile and log level + +## [2.2.2] - Mar 30, 2020 +* Apply initContainers.resources to `copy-system-yaml`, `prepare-custom-persistent-volume`, and `migration-artifactory-ha` containers +* Use the same defaulting mechanism used for the artifactory version used elsewhere in the chart +* Removed duplicate `artifactory-license` volume that prevented using an external secret + +## [2.2.1] - Mar 29, 2020 +* Fix loggers sidecars configurations to support new file system layout and new log names + +## [2.2.0] - Mar 29, 2020 +* Fix broken admin user bootstrap configuration +* **Breaking change:** renamed `artifactory.accessAdmin` to `artifactory.admin` + +## [2.1.3] - Mar 24, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [2.1.2] - Mar 21, 2020 +* Support for SSL offload in Nginx service(LoadBalancer) layer. Introduced `nginx.service.ssloffload` field with boolean type. + +## [2.1.1] - Mar 23, 2020 +* Moved installer info to values.yaml so it is fully customizable + +## [2.1.0] - Mar 23, 2020 +* Updated Artifactory version to 7.3.2 + +## [2.0.36] - Mar 20, 2020 +* Add support GCP credentials.json authentication + +## [2.0.35] - Mar 20, 2020 +* Add support for masterKey trim during 6.x to 7.x migration if 6.x masterKey is 32 hex (64 characters) + +## [2.0.34] - Mar 19, 2020 +* Add support for NFS directories `haBackupDir` and `haDataDir` + +## [2.0.33] - Mar 18, 2020 +* Increased Nginx proxy_buffers size + +## [2.0.32] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files +* useInstanceCredentials variable was declared in S3 settings but not used in chart. Now it is being used. + +## [2.0.31] - Mar 17, 2020 +* Fix rendering of Service Account annotations + +## [2.0.30] - Mar 16, 2020 +* Add Unsupported message from 6.18 to 7.2.x (migration) + +## [2.0.29] - Mar 11, 2020 +* Upgrade Docs update + +## [2.0.28] - Mar 11, 2020 +* Unified charts public release + +## [2.0.27] - Mar 8, 2020 +* Add an optional wait for primary node to be ready with a proper test for http status + +## [2.0.23] - Mar 6, 2020 +* Fix path to `/artifactory_bootstrap` +* Add support for controlling the name of the ingress and allow to set more than one cname + +## [2.0.22] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [2.0.21] - Feb 28, 2020 +* Add support to process `valueFrom` for extraEnvironmentVariables + +## [2.0.20] - Feb 26, 2020 +* Store join key to secret + +## [2.0.19] - Feb 26, 2020 +* Updated Artifactory version to 7.2.1 + +## [2.0.12] - Feb 07, 2020 +* Remove protection flag `databaseUpgradeReady` which was added to check internal postgres upgrade + +## [2.0.0] - Feb 07, 2020 +* Updated Artifactory version to 7.0.0 + +## [1.4.10] - Feb 13, 2020 +* Add support for SSH authentication to Artifactory + +## [1.4.9] - Feb 10, 2020 +* Fix custom DB password indention + +## [1.4.8] - Feb 9, 2020 +* Add support for `tpl` in the `postStartCommand` + +## [1.4.7] - Feb 4, 2020 +* Support customisable Nginx kind + +## [1.4.6] - Feb 2, 2020 +* Add a comment stating that it is recommended to use an external PostgreSQL with a static password for production installations + +## [1.4.5] - Feb 2, 2020 +* Add support for primary or member node specific preStartCommand + +## [1.4.4] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [1.4.3] - Jan 26, 2020 +* Improve `database.user` and `database.password` logic in order to support more use cases and make the configuration less repetitive + +## [1.4.2] - Jan 22, 2020 +* Refined pod disruption budgets to separate nginx and Artifactory pods + +## [1.4.1] - Jan 19, 2020 +* Fix replicator port config in nginx replicator configmap + +## [1.4.0] - Jan 19, 2020 +* Updated Artifactory version to 6.17.0 + +## [1.3.8] - Jan 16, 2020 +* Added example for external nginx-ingress + +## [1.3.7] - Jan 07, 2020 +* Add support for customizable `mountOptions` of NFS PVs + +## [1.3.6] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [1.3.5] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [1.3.4] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.3] - Dec 16, 2019 +* Another fix for toggling nginx service ports + +## [1.3.2] - Dec 12, 2019 +* Fix for toggling nginx service ports + +## [1.3.1] - Dec 10, 2019 +* Add support for toggling nginx service ports + +## [1.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [1.2.4] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [1.2.3] - Nov 27, 2019 +* Add support for PriorityClass + +## [1.2.2] - Nov 20, 2019 +* Update Artifactory logo + +## [1.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [1.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [1.1.12] - Nov 17, 2019 +* Fix `README.md` format (broken table) + +## [1.1.11] - Nov 17, 2019 +* Update comment on Artifactory master key + +## [1.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [1.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [1.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [1.1.7] - Nov 11, 2019 +* Additional documentation for masterKey + +## [1.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [1.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [1.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [1.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [1.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [1.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [1.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [1.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [1.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [0.17.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [0.17.2] - Oct 21, 2019 +* Add support for setting `artifactory.primary.labels` +* Add support for setting `artifactory.node.labels` +* Add support for setting `nginx.labels` + +## [0.17.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [0.17.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [0.16.7] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [0.16.6] - Sep 24, 2019 +* Add support for setting `nginx.service.labels` + +## [0.16.5] - Sep 23, 2019 +* Add support for setting `artifactory.customInitContainersBegin` + +## [0.16.4] - Sep 20, 2019 +* Add support for setting `initContainers.resources` + +## [0.16.3] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [0.16.2] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [0.16.1] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [0.16.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [0.15.15] - Aug 18, 2019 +* Fix existingSharedClaim permissions issue and example + +## [0.15.14] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [0.15.13] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [0.15.12] - Aug 6, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [0.15.11] - Aug 5, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [0.15.10] - Aug 5, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [0.15.9] - Aug 1, 2019 +* Fix masterkey/masterKeySecretName not specified warning render logic in NOTES.txt + +## [0.15.8] - Jul 28, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [0.15.7] - Jul 25, 2019 +* Updated README about how to apply Artifactory licenses + +## [0.15.6] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [0.15.5] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [0.15.4] - Jul 11, 2019 +* Add `artifactory.customVolumeMounts` support to member node statefulset template + +## [0.15.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [0.15.2] - Jul 3, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [0.15.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [0.15.0] - Jun 27, 2019 +* Updated Artifactory version to 6.11.0 and Restart Primary node when bootstrap.creds file has been modified in artifactory-ha + +## [0.14.4] - Jun 24, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [0.14.3] - Jun 24, 2019 +* Update chart maintainers + +## [0.14.2] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [0.14.1] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [0.14.0] - Jun 20, 2019 +* Use ConfigMaps for nginx configuration and remove nginx postStart command + +## [0.13.10] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [0.13.9] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [0.13.8] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [0.13.7] - Jun 6, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [0.13.6] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [0.13.5] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [0.13.4] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [0.13.3] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [0.13.2] - May 19, 2019 +* Fix missing logger image tag + +## [0.13.1] - May 15, 2019 +* Support `artifactory.persistence.cacheProviderDir` for on-premise cluster + +## [0.13.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [0.12.23] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [0.12.22] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [0.12.21] - Apr 30, 2019 +* Add support for JMX monitoring + +## [0.12.20] - Apr29, 2019 +* Added support for headless services + +## [0.12.19] - Apr 28, 2019 +* Added support for `cacheProviderDir` + +## [0.12.18] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [0.12.17] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [0.12.16] - Apr 12, 2019 +* Added support for `customVolumeMounts` + +## [0.12.15] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [0.12.14] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [0.12.13] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [0.12.12] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [0.12.11] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [0.12.10] - Aprl 07, 2019 +* Change network policy API group + +## [0.12.9] - Aprl 04, 2019 +* Apply the existing PVC for members (in addition to primary) + +## [0.12.8] - Aprl 03, 2019 +* Bugfix for userPluginSecrets + +## [0.12.7] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [0.12.6] - Aprl 03, 2019 +* Added installer info + +## [0.12.5] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [0.12.4] - Apr 02, 2019 +* Fix issue #253 (use existing PVC for data and backup storage) + +## [0.12.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [0.12.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [0.12.1] - Mar 26, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [0.12.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [0.11.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [0.11.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [0.11.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [0.11.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [0.11.14] - Mar 21, 2019 +* Make ingress path configurable + +## [0.11.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart for Primary + +## [0.11.12] - Mar 19, 2019 +* Fix existingClaim example + +## [0.11.11] - Mar 18, 2019 +* Disable the option to use nginx PVC with more than one replica + +## [0.11.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [0.11.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [0.11.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [0.11.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 + +## [0.11.6] - Mar 13, 2019 +* Move securityContext to container level + +## [0.11.5] - Mar 11, 2019 +* Add the option to use existing volume claims for Artifactory storage + +## [0.11.4] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [0.11.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [0.11.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [0.11.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [0.11.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [0.10.3] - Feb 21, 2019 +* Add s3AwsVersion option to awsS3 configuration for use with IAM roles + +## [0.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [0.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [0.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [0.9.7] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [0.9.6] - Feb 7, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [0.9.5] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.4] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.3] - Feb 5, 2019 +* Remove the inactive server remove plugin + +## [0.9.2] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [0.9.1] - Jan 27, 2019 +* Fix support for Azure Blob Storage Binary provider + +## [0.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [0.8.10] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [0.8.9] - Jan 18, 2019 +* Added support of values ingress.labels + +## [0.8.8] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [0.8.7] - Jan 15, 2018 +* Add support for Azure Blob Storage Binary provider + +## [0.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [0.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [0.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [0.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [0.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [0.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [0.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [0.7.17] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [0.7.16] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [0.7.15] - Dec 9, 2018 +* AWS S3 add `roleName` for using IAM role + +## [0.7.14] - Dec 6, 2018 +* AWS S3 `identity` and `credential` are now added only if have a value to allow using IAM role + +## [0.7.13] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [0.7.12] - Dec 2, 2018 +* Remove Java option "-Dartifactory.locking.provider.type=db". This is already the default setting. + +## [0.7.11] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [0.7.10] - Nov 29, 2018 +* Fixed the volumeMount for the replicator.yaml + +## [0.7.9] - Nov 29, 2018 +* Optionally include primary node into poddisruptionbudget + +## [0.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [0.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [0.7.6] - Nov 18, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [0.7.5] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [0.7.4] - Nov 13, 2018 +* Allow pod anti-affinity settings to include primary node + +## [0.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [0.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [0.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [0.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.9] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [0.6.8] - Oct 22, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [0.6.7] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [0.6.6] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [0.6.5] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow `soft` or `hard` specification of member node anti-affinity +* Allow providing pre-existing secrets containing external database credentials +* Fix `s3` binary store provider to properly use the `cache-fs` provider +* Allow arbitrary properties when using the `s3` binary store provider + +## [0.6.4] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 14, 2018 +* Make S3 endpoint configurable (was hardcoded with `s3.amazonaws.com`) + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [0.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [0.5.3] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [0.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [0.4.7] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [0.4.6] - Sep 25, 2018 +* Add PodDisruptionBudget for member nodes, defaulting to minAvailable of 1 + +## [0.4.4] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 + +## [0.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [0.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/Chart.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/Chart.yaml new file mode 100644 index 000000000..6422c9841 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha +apiVersion: v1 +appVersion: 7.6.3 +description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: amithk@jfrog.com + name: amithins +- email: daniele@jfrog.com + name: danielezer +- email: eldada@jfrog.com + name: eldada +- email: ramc@jfrog.com + name: chukka +- email: rimasm@jfrog.com + name: rimusz +name: artifactory-ha +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 3.0.1400 diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/LICENSE b/charts/artifactory-ha/artifactory-ha/3.0.1400/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/README.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/README.md new file mode 100644 index 000000000..f9ad3619d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/README.md @@ -0,0 +1,1587 @@ +# JFrog Artifactory High Availability Helm Chart + +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database **NOTE:** For production grade installations it is recommended to use an external PostgreSQL +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +> This can be controlled by the parameter `artifactory.service.pool`. + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +**NOTE:** Passing masterKey is mandatory for fresh install of chart (7.x Appversion) + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: + +```bash +helm upgrade --install artifactory-ha --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory for small/medium/large instllations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade --namespace artifactory-ha center/jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 +``` + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` +``` +In order to use a GCP service account, Artifactory needs a gcp.credentials.json file in the same directory asa binaraystore.xml file. +This can be generated by running: +```bash +gcloud iam service-accounts keys create --iam-account +``` +Which will produce the following, which can be saved to a file or copied into your `values.yaml`. +```bash +{ + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." +} +``` + +One option is to create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` in a custom `values.yaml` +```bash +# Create the Kubernetes secret from the file you created earlier. +# IMPORTANT: The file must be called "gcp.credentials.json" because this is used later as the secret key! +kubectl create secret generic artifactory-gcp-creds --from-file=./gcp.credentials.json +``` +Set this secret in your custom `values.yaml` +```bash +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + customSecretName: artifactory-gcp-creds +``` + +Another option is to put your generated config directly in your custom `values.yaml` and the a secret will be created from it +``` +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + config: | + { + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + } +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Create a unique Master Key + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.masterKeySecretName=my-secret --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory-ha --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.joinKeySecretName=my-secret --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged.. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm upgrade --install artifactory-ha -f values.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory-ha center/jfrog/artifactory-ha -f admin-creds-values.yaml +``` + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory-ha --set nginx.customConfigMap=nginx-config --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha center/jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! +##### Configuring Artifactory with external Oracle database +To use artifactory with oracledb the required instant client library files, libaio has to be copied to tomcat lib. Also set LD_LIBRARY_PATH env variable. +1. Create a value file with the configuration +```yaml +postgresql: + enabled: false +database: + type: oracle + driver: oracle.jdbc.OracleDriver + url: + user: + password: +artifactory: + preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O instantclient-basic-linux.x64-19.6.0.0.0dbru.zip https://download.oracle.com/otn_software/linux/instantclient/19600/instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && unzip -jn instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && wget -O libaio1_0.3.110-3_amd64.deb http://ftp.br.debian.org/debian/pool/main/liba/libaio/libaio1_0.3.110-3_amd64.deb && dpkg-deb -x libaio1_0.3.110-3_amd64.deb . && cp lib/x86_64-linux-gnu/* ." + extraEnvironmentVariables: + - name: LD_LIBRARY_PATH + value: /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory-ha --namespace artifactory -f values-oracle.yaml +``` +**NOTE:** If its an upgrade from 6.x to 7.x, add same `preStartCommand` under `artifactory.migration.preStartCommand` + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster + +On helm v2: +```bash +helm delete --purge artifactory-ha +``` + +On helm v3: +```bash +helm delete artifactory-ha --namespace artifactory-ha +``` + +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory-ha --set imagePullSecrets=regsecret --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Custom secrets +If you need to add a custom secret in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom secrets in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + # Add custom secrets - secret per file + customSecrets: + - name: custom-secret + key: custom-secret.yaml + data: > + secret data +``` + +To use a custom secret, need to define a custom volume. +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + - name: custom-secret + secret: + secretName: custom-secret +``` + +To use a volume, need to define a volume mount as part of a custom init or sidecar container. +```yaml +artifactory: + customSidecarContainers: + - name: side-car-container + volumeMounts: + - name: custom-secret + mountPath: /opt/custom-secret.yaml + subPath: custom-secret.yaml + readOnly: true +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory-ha -f plugins.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +For additional information, please refer [here](https://www.jfrog.com/confluence/display/JFROG/User+Plugins). + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f configmaps.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + +### Establishing TLS and Adding certificates +In HTTPS, the communication protocol is encrypted using Transport Layer Security (TLS). By default, TLS between JFrog Platform nodes is disabled. +When TLS is enabled, JFrog Access acts as the Certificate Authority (CA) signs the TLS certificates used by all the different JFrog Platform nodes. + +To establish TLS between JFrog Platform nodes: +Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. For more info, Please refer [here](https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates) + +To enable tls in charts, set `tls` to true under `access` in [values.yaml](values.yaml). By default it's false +```yaml +access: + accessConfig: + security: + tls: true +``` + +To add custom tls certificates, create a tls secret from the certificate files. + +```bash +kubectl create secret tls --cert=ca.crt --key=ca.private.key +``` + +For resetting access certificates , you can set `resetAccessCAKeys` to true under access section in [values.yaml](values.yaml) and perform an helm upgrade. +* Note : Once helm upgrade is done, set `resetAccessCAKeys` to false for subsequent upgrades (to avoid resetting access certificates on every helm upgrade) +```yaml +access: + accessConfig: + security: + tls: true + customCertificatesSecretName: + resetAccessCAKeys: true +``` + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f filebeat.yaml --namespace artifactory-ha center/jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +### Install Artifactory HA with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. +In order to do that, simply add the following to a `artifactory-ssl-values.yaml` file: +```yaml + nginx: + https: + enabled: false + service: + ssloffload: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ssl-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these below lines to `artifactory-ingress-values.yaml` file +```yaml + ingress: + enabled: true + hosts: + - artifactory.company.com + artifactory: + service: + type: NodePort + nginx + enabled: false +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ingress-values.yaml--namespace artifactory center/jfrog/artifactory +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + +### Dedicated Ingress object for replicator service + +You have the option to add additional ingress object to the Replicator service. An example for this use case can be routing the /replicator/ path to Artifactory. +In order to do that, simply add the following to a `artifactory-values.yaml` file: + +```yaml +artifactory: + replicator: + enabled: true + ingress: + name: + hosts: + - myhost.example.com + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-buffering: "off" + nginx.ingress.kubernetes.io/configuration-snippet: | + chunked_transfer_encoding on; + tls: + - hosts: + - "myhost.example.com" + secretName: +``` + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true stable/nginx-ingress +``` +or create a values.yaml file with the following content: +```yaml +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress -f values.yaml +``` + +## Configuration +The following table lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|------------------------------|-----------------------------------|-------------------------------------------------------| +| `imagePullSecrets` | Docker registry pull secret | | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Artifactory service account annotations | `` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `rbac.role.rules` | Rules to create | `[]` | +| `logger.image.repository` | repository for logger image | `busybox` | +| `logger.image.tag` | tag for logger image | `1.30` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container image tag | `.Chart.AppVersion` | +| `artifactory.priorityClass.create` | Create a PriorityClass object | `false` | +| `artifactory.priorityClass.value` | Priority Class value | `1000000000` | +| `artifactory.priorityClass.name` | Priority Class name | `{{ template "artifactory-ha.fullname" . }}` | +| `artifactory.priorityClass.existingPriorityClass` | Use existing priority class | `` | +| `artifactory.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `artifactory.loggersResources.requests.memory` | Artifactory loggers initial memory request | | +| `artifactory.loggersResources.requests.cpu` | Artifactory loggers initial cpu request | | +| `artifactory.loggersResources.limits.memory` | Artifactory loggers memory limit | | +| `artifactory.loggersResources.limits.cpu` | Artifactory loggers cpu limit | | +| `artifactory.catalinaLoggers` | Artifactory Tomcat loggers (see values.yaml for possible values) | `[]` | +| `artifactory.catalinaLoggersResources.requests.memory` | Artifactory Tomcat loggers initial memory request | | +| `artifactory.catalinaLoggersResources.requests.cpu` | Artifactory Tomcat loggers initial cpu request | | +| `artifactory.catalinaLoggersResources.limits.memory` | Artifactory Tomcat loggers memory limit | | +| `artifactory.catalinaLoggersResources.limits.cpu` | Artifactory Tomcat loggers cpu limit | | +| `artifactory.customInitContainersBegin`| Custom init containers to run before existing init containers | | +| `artifactory.customInitContainers`| Custom init containers to run after existing init containers | | +| `artifactory.customSidecarContainers`| Custom sidecar containers | | +| `artifactory.customVolumes` | Custom volumes | | +| `artifactory.customVolumeMounts` | Custom Artifactory volumeMounts | | +| `artifactory.customPersistentPodVolumeClaim` | Custom PVC spec to create and attach a unique PVC for each pod on startup with the volumeClaimTemplates feature in StatefulSet | | +| `artifactory.customPersistentVolumeClaim` | Custom PVC spec to be mounted to the all artifactory containers using a volume | | +| `artifactory.customSecrets` | Custom secrets | | +| `artifactory.userPluginSecrets` | Array of secret names for Artifactory user plugins | | +| `artifactory.masterKey` | Artifactory master key. A 128-Bit key size (hexadecimal encoded) string (32 hex characters). Can be generated with `openssl rand -hex 32`. NOTE: This key can be generated only once and cannot be updated once created |``| +| `artifactory.masterKeySecretName` | Artifactory Master Key secret name | | +| `artifactory.joinKey` | Join Key to connect other services to Artifactory. Can be generated with `openssl rand -hex 32` | `` | +| `artifactory.joinKeySecretName` | Artifactory join Key secret name | | +| `artifactory.admin.ip` | Artifactory admin ip to be set upon startup, can use (*) for 0.0.0.0| `127.0.0.1` | +| `artifactory.admin.username` | Artifactory admin username to be set upon startup| `admin` | +| `artifactory.admin.password` | Artifactory admin password to be set upon startup| | +| `artifactory.admin.secret` | Artifactory admin secret name | | +| `artifactory.admin.dataKey` | Artifactory admin secret data key | | +| `artifactory.preStartCommand` | Command to run before entrypoint starts | | +| `artifactory.postStartCommand` | Command to run after container starts. Supports templating with `tpl` | | +| `artifactory.license.licenseKey` | Artifactory license key. Providing the license key as a parameter will cause a secret containing the license key to be created as part of the release. Use either this setting or the license.secret and license.dataKey. If you use both, the latter will be used. | | +| `artifactory.configMaps` | configMaps to be created as volume by the name `artifactory-configmaps`. In order to use these configMaps, you will need to add `customVolumeMounts` to point to the created volume and mount it onto a container | | +| `artifactory.license.secret` | Artifactory license secret name | | +| `artifactory.license.dataKey`| Artifactory license secret data key | | +| `artifactory.service.name` | Artifactory service name to be set in Nginx configuration | `artifactory` | +| `artifactory.service.type` | Artifactory service type | `ClusterIP` | +| `artifactory.service.clusterIP`| Specific cluster IP or `None` for headless services | `nil` | +| `artifactory.service.loadBalancerSourceRanges`| Artifactory service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `artifactory.service.annotations` | Artifactory service annotations | `{}` | +| `artifactory.service.pool` | Artifactory instances to be in the load balancing pool. `members` or `all` | `members` | +| `artifactory.externalPort` | Artifactory service external port | `8082` | +| `artifactory.internalPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8082` | +| `artifactory.internalArtifactoryPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8081` | +| `artifactory.externalArtifactoryPort` | Artifactory service external port | `8081` | +| `artifactory.extraEnvironmentVariables` | Extra environment variables to pass to Artifactory. Supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function. See [documentation](https://www.jfrog.com/confluence/display/RTF/Installing+with+Docker#InstallingwithDocker-SupportedEnvironmentVariables) | | +| `artifactory.livenessProbe.enabled` | Enable liveness probe | `true` | +| `artifactory.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `artifactory.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `artifactory.readinessProbe.path` | readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `artifactory.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.copyOnEveryStartup` | List of files to copy on startup from source (which is absolute) to target (which is relative to ARTIFACTORY_HOME | | +| `artifactory.deleteDBPropertiesOnStartup` | Whether to delete the ARTIFACTORY_HOME/etc/db.properties file on startup. Disabling this will remove the ability for the db.properties to be updated with any DB-related environment variables change (e.g. DB_HOST, DB_URL) | `true` | +| `artifactory.database.maxOpenConnections` | Maximum amount of open connections from Artifactory to the DB | `80` | +| `artifactory.haDataDir.enabled` | Enable haDataDir for eventual storage in the HA cluster | `false` | +| `artifactory.haDataDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.haBackupDir.enabled` | Enable haBackupDir for eventual storage in the HA cluster | `false` | +| `artifactory.haBackupDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.haBackupDir.enabled` | Enable haBackupDir for eventual storage in the HA cluster | `false` | +| `artifactory.haBackupDir.path` | Path to the directory intended for use with NFS eventual configuration for HA | | +| `artifactory.migration.timeoutSeconds` | Artifactory migration Maximum Time out in seconds| `3600` | +| `artifactory.migration.enabled` | Artifactory migration enabled or disabled | `true` | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence or local volume size | `200Gi` | +| `artifactory.persistence.binarystore.enabled` | whether you want to mount the binarystore.xml file from a secret created by the chart. If `false` you will need need to get the binarystore.xml file into the file-system from either an `initContainer` or using a `preStartCommand` | `true` | +| `artifactory.persistence.binarystoreXml` | Artifactory binarystore.xml template | See `values.yaml` | +| `artifactory.persistence.customBinarystoreXmlSecret` | A custom Secret for binarystore.xml | `` | +| `artifactory.persistence.maxCacheSize` | Artifactory cache-fs provider maxCacheSize in bytes | `50000000000` | +| `artifactory.persistence.cacheProviderDir` | the root folder of binaries for the filestore cache. If the value specified starts with a forward slash ("/") it is considered the fully qualified path to the filestore folder. Otherwise, it is considered relative to the *baseDataDir*. | `cache` | +| `artifactory.persistence.type` | Artifactory HA storage type | `file-system` | +| `artifactory.persistence.redundancy` | Artifactory HA storage redundancy | `3` | +| `artifactory.persistence.nfs.ip` | NFS server IP | | +| `artifactory.persistence.nfs.haDataMount` | NFS data directory | `/data` | +| `artifactory.persistence.nfs.haBackupMount` | NFS backup directory | `/backup` | +| `artifactory.persistence.nfs.dataDir` | HA data directory | `/var/opt/jfrog/artifactory-ha` | +| `artifactory.persistence.nfs.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.persistence.nfs.capacity` | NFS PVC size | `200Gi` | +| `artifactory.persistence.nfs.mountOptions` | NFS mount options | `[]` | +| `artifactory.persistence.eventual.numberOfThreads` | Eventual number of threads | `10` | +| `artifactory.persistence.googleStorage.endpoint` | Google Storage API endpoint| `storage.googleapis.com` | +| `artifactory.persistence.googleStorage.httpsOnly` | Google Storage API has to be consumed https only| `false` | +| `artifactory.persistence.googleStorage.bucketName` | Google Storage bucket name | `artifactory-ha` | +| `artifactory.persistence.googleStorage.identity` | Google Storage service account id | | +| `artifactory.persistence.googleStorage.credential` | Google Storage service account key | | +| `artifactory.persistence.googleStorage.path` | Google Storage path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.googleStorage.bucketExists`| Google Storage bucket exists therefore does not need to be created.| `false` | +| `artifactory.persistence.awsS3.bucketName` | AWS S3 bucket name | `artifactory-ha` | +| `artifactory.persistence.awsS3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3.roleName` | AWS S3 IAM role name | | +| `artifactory.persistence.awsS3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3.properties` | AWS S3 additional properties | | +| `artifactory.persistence.awsS3.path` | AWS S3 path in bucket | `artifactory-ha/filestore` | +| `artifactory.persistence.awsS3.refreshCredentials` | AWS S3 renew credentials on expiration | `true` (When roleName is used, this parameter will be set to true) | +| `artifactory.persistence.awsS3.httpsOnly` | AWS S3 https access to the bucket only | `true` | +| `artifactory.persistence.awsS3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3.s3AwsVersion` | AWS S3 signature version | `AWS4-HMAC-SHA256` | +| `artifactory.persistence.awsS3V3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3V3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3V3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3V3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3V3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3V3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3V3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3V3.maxConnections` | AWS S3 bucket maxConnections | `50` | +| `artifactory.persistence.awsS3V3.kmsServerSideEncryptionKeyId` | AWS S3 encryption key ID or alias | | +| `artifactory.persistence.awsS3V3.kmsKeyRegion` | AWS S3 KMS Key region | | +| `artifactory.persistence.awsS3V3.kmsCryptoMode` | AWS S3 KMS encryption mode | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate | +| `artifactory.persistence.awsS3V3.useInstanceCredentials` | AWS S3 Use default authentication mechanism | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-authentication | +| `artifactory.persistence.awsS3V3.usePresigning` | AWS S3 Use URL signing | `false` | +| `artifactory.persistence.awsS3V3.signatureExpirySeconds` | AWS S3 Validity period in seconds for signed URLs | `300` | +| `artifactory.persistence.awsS3V3.cloudFrontDomainName` | AWS CloudFront domain name | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontKeyPairId` | AWS CloudFront key pair ID | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontPrivateKey` | AWS CloudFront private key | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.azureBlob.accountName` | Azure Blob Storage account name | `` | +| `artifactory.persistence.azureBlob.accountKey` | Azure Blob Storage account key | `` | +| `artifactory.persistence.azureBlob.endpoint` | Azure Blob Storage endpoint | `` | +| `artifactory.persistence.azureBlob.containerName` | Azure Blob Storage container name | `` | +| `artifactory.persistence.azureBlob.testConnection` | Azure Blob Storage test connection | `false` | +| `artifactory.persistence.fileSystem.existingSharedClaim` | Enable using an existing shared pvc | `false` | +| `artifactory.persistence.fileStorage.dataDir` | HA data directory | `/var/opt/jfrog/artifactory/artifactory-data` | +| `artifactory.persistence.fileStorage.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.javaOpts.other` | Artifactory additional java options (for all nodes) | | +| `artifactory.replicator.enabled` | Enable the Replicator service (relevant for Enterprise+ only) | `false` | +| `artifactory.ssh.enabled` | Enable Artifactory SSH access | | +| `artifactory.ssh.internalPort` | Artifactory SSH internal port | `1339` | +| `artifactory.ssh.externalPort` | Artifactory SSH external port | `1339` | +| `artifactory.primary.preStartCommand` | Artifactory primary node preStartCommand to be run after `artifactory.preStartCommand` | | +| `artifactory.primary.labels` | Artifactory primary node labels | `{}` | +| `artifactory.primary.resources.requests.memory` | Artifactory primary node initial memory request | | +| `artifactory.primary.resources.requests.cpu` | Artifactory primary node initial cpu request | | +| `artifactory.primary.resources.limits.memory` | Artifactory primary node memory limit | | +| `artifactory.primary.resources.limits.cpu` | Artifactory primary node cpu limit | | +| `artifactory.primary.javaOpts.xms` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.xmx` | Artifactory primary node java Xms size | | +| `artifactory.primary.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the primary node - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.primary.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.primary.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.primary.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.primary.name" $ }}` | +| `artifactory.primary.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.primary.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.primary.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.primary.javaOpts.other` | Artifactory primary node additional java options | | +| `artifactory.primary.persistence.existingClaim` | Whether to use an existing pvc for the primary node | `false` | +| `artifactory.node.preStartCommand` | Artifactory member node preStartCommand to be run after `artifactory.preStartCommand` | | +| `artifactory.node.labels` | Artifactory member node labels | `{}` | +| `artifactory.node.replicaCount` | Artifactory member node replica count | `2` | +| `artifactory.node.minAvailable` | Artifactory member node min available count | `1` | +| `artifactory.node.resources.requests.memory` | Artifactory member node initial memory request | | +| `artifactory.node.resources.requests.cpu` | Artifactory member node initial cpu request | | +| `artifactory.node.resources.limits.memory` | Artifactory member node memory limit | | +| `artifactory.node.resources.limits.cpu` | Artifactory member node cpu limit | | +| `artifactory.node.javaOpts.xms` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.xmx` | Artifactory member node java Xms size | | +| `artifactory.node.javaOpts.corePoolSize` | The number of async processes that can run in parallel in the member nodes - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `16` | +| `artifactory.node.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.node.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.node.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory-ha.fullname" $ }}` | +| `artifactory.node.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.node.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.node.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.node.javaOpts.other` | Artifactory member node additional java options | | +| `artifactory.node.persistence.existingClaim` | Whether to use existing PVCs for the member nodes | `false` | +| `artifactory.terminationGracePeriodSeconds` | Termination grace period (seconds) | `30s` | +| `artifactory.node.waitForPrimaryStartup.enabled` | Whether to wait for the primary node to start before starting up the member nodes | `false` | +| `artifactory.node.waitForPrimaryStartup.time` | The amount of time to wait for the primary node to start before starting up the member nodes | `60` | +| `artifactory.tomcat.connector.maxThreads` | The max number of connections to Artifactory connector | `200` | +| `artifactory.tomcat.connector.extraConfig` | The max queue length for incoming connections to Artifactory connector | `'acceptCount="100"'` | +| `artifactory.systemYaml` | Artifactory system configuration (`system.yaml`) as described here - https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML | `see values.yaml` | +| `artifactory.primary.affinity` | Artifactory primary node affinity | `{}` | +| `artifactory.node.affinity` | Artifactory member node affinity | `{}` | +| `access.database.maxOpenConnections` | Maximum amount of open connections from Access to the DB | `80` | +| `access.tomcat.connector.maxThreads` | The max number of connections to Aceess connector | `50` | +| `access.tomcat.connector.extraConfig` | The max queue length for incoming connections to Access connector | `'acceptCount="100"'` | +| `initContainers.resources.requests.memory` | Init containers initial memory request | | +| `initContainers.resources.requests.cpu` | Init containers initial cpu request | | +| `initContainers.resources.limits.memory` | Init containers memory limit | | +| `initContainers.resources.limits.cpu` | Init containers cpu limit | | +| `ingress.enabled` | If true, Artifactory Ingress will be created | `false` | +| `ingress.annotations` | Artifactory Ingress annotations | `{}` | +| `ingress.labels` | Artifactory Ingress labels | `{}` | +| `ingress.hosts` | Artifactory Ingress hostnames | `[]` | +| `ingress.routerPath` | Router Ingress path | `/` | +| `ingress.artifactoryPath` | Artifactory Ingress path | `/artifactory` | +| `ingress.tls` | Artifactory Ingress TLS configuration (YAML) | `[]` | +| `ingress.defaultBackend.enabled` | If true, the default `backend` will be added using serviceName and servicePort | `true` | +| `ingress.annotations` | Ingress annotations, which are written out if annotations section exists in values. Everything inside of the annotations section will appear verbatim inside the resulting manifest. See `Ingress annotations` section below for examples of how to leverage the annotations, specifically for how to enable docker authentication. | | +| `ingress.additionalRules` | Ingress additional rules to be added to the Artifactory ingress. | `[]` | +| `metadata.database.maxOpenConnections` | Maximum amount of open connections from metadata to the DB | `80` | +| `nginx.enabled` | Deploy nginx server | `true` | +| `nginx.kind` | Nginx object kind, for example `DaemonSet`, `Deployment` or `StatefulSet` | `Deployment` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.uid` | Nginx User Id | `104` | +| `nginx.gid` | Nginx Group Id | `107` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.version` | Container version | `.Chart.AppVersion` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.labels` | Nginx deployment labels | `{}` | +| `nginx.minAvailable` | Nginx node min available count | `0` | +| `nginx.loggers` | Nginx loggers (see values.yaml for possible values) | `[]` | +| `nginx.loggersResources.requests.memory` | Nginx logger initial memory request | | +| `nginx.loggersResources.requests.cpu` | Nginx logger initial cpu request | | +| `nginx.loggersResources.limits.memory` | Nginx logger memory limit | | +| `nginx.loggersResources.limits.cpu` | Nginx logger cpu limit | | +| `nginx.logs.stderr` | Send nginx logs to stderr | false | +| `nginx.logs.level` | Nginx log level: debug, info, notice, warn, error, crit, alert, or emerg | warn | +| `nginx.mainConf` | Content of the Artifactory nginx main nginx.conf config file | `see values.yaml` | +| `nginx.artifactoryConf` | Content of Artifactory nginx artifactory.conf config file | `see values.yaml` | +| `nginx.service.type` | Nginx service type | `LoadBalancer` | +| `nginx.service.clusterIP` | Specific cluster IP or `None` for headless services | `nil` | +| `nginx.service.loadBalancerSourceRanges`| Nginx service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `nginx.service.labels` | Nginx service labels | `{}` | +| `nginx.service.annotations` | Nginx service annotations | `{}` | +| `nginx.service.ssloffload` | Nginx service SSL offload | false | +| `nginx.service.externalTrafficPolicy`| Nginx service desires to route external traffic to node-local or cluster-wide endpoints. | `Cluster` | +| `nginx.loadBalancerIP`| Provide Static IP to configure with Nginx | | +| `nginx.http.enabled` | Nginx http service enabled/disabled | true | +| `nginx.http.externalPort` | Nginx service external port | `80` | +| `nginx.http.internalPort` | Nginx service internal port | `80` | +| `nginx.https.enabled` | Nginx http service enabled/disabled | true | +| `nginx.https.externalPort` | Nginx service external port | `443` | +| `nginx.https.internalPort` | Nginx service internal port | `443` | +| `nginx.ssh.internalPort` | Nginx SSH internal port | `22` | +| `nginx.ssh.externalPort` | Nginx SSH external port | `22` | +| `nginx.externalPortHttp` | DEPRECATED: Nginx service external port | `80` | +| `nginx.internalPortHttp` | DEPRECATED: Nginx service internal port | `80` | +| `nginx.externalPortHttps` | DEPRECATED: Nginx service external port | `443` | +| `nginx.internalPortHttps` | DEPRECATED: Nginx service internal port | `443` | +| `nginx.livenessProbe.enabled` | would you like a liveness Probe to be enabled | `true` | +| `nginx.livenessProbe.path` | liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 100 | +| `nginx.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `nginx.readinessProbe.path` | Readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `nginx.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `nginx.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `nginx.tlsSecretName` | SSL secret that will be used by the Nginx pod | | +| `nginx.customConfigMap` | Nginx CustomeConfigMap name for `nginx.conf` | ` ` | +| `nginx.customArtifactoryConfigMap`| Nginx CustomeConfigMap name for `artifactory-ha.conf` | ` ` | +| `nginx.resources.requests.memory` | Nginx initial memory request | `250Mi` | +| `nginx.resources.requests.cpu` | Nginx initial cpu request | `100m` | +| `nginx.resources.limits.memory` | Nginx memory limit | `250Mi` | +| `nginx.resources.limits.cpu` | Nginx cpu limit | `500m` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled. This is only available when the nginx.replicaCount is set to 1 | `false` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | +| `waitForDatabase` | Wait for database (using wait-for-db init container) | `true` | +| `postgresql.enabled` | Use enclosed PostgreSQL as database | `true` | +| `postgresql.image.registry` | PostgreSQL image registry | `docker.bintray.io` | +| `postgresql.image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `postgresql.image.tag` | PostgreSQL image tag | `9.6.18-debian-10-r7` | +| `postgresql.postgresqlDatabase` | PostgreSQL database name | `artifactory` | +| `postgresql.postgresqlUsername` | PostgreSQL database user | `artifactory` | +| `postgresql.postgresqlPassword` | PostgreSQL database password | | +| `postgresql.postgresqlExtendedConf.listenAddresses` | PostgreSQL listen address | `"'*'"` | +| `postgresql.postgresqlExtendedConf.maxConnections` | PostgreSQL max_connections parameter | `1500` | +| `postgresql.persistence.enabled` | PostgreSQL use persistent storage | `true` | +| `postgresql.persistence.size` | PostgreSQL persistent storage size | `50Gi` | +| `postgresql.service.port` | PostgreSQL database port | `5432` | +| `postgresql.resources.requests.memory` | PostgreSQL initial memory request | | +| `postgresql.resources.requests.cpu` | PostgreSQL initial cpu request | | +| `postgresql.resources.limits.memory` | PostgreSQL memory limit | | +| `postgresql.resources.limits.cpu` | PostgreSQL cpu limit | | +| `postgresql.master.nodeSelector` | PostgreSQL master node selector | `{}` | +| `postgresql.master.affinity` | PostgreSQL master node affinity | `{}` | +| `postgresql.master.tolerations` | PostgreSQL master node tolerations | `[]` | +| `postgresql.slave.nodeSelector` | PostgreSQL slave node selector | `{}` | +| `postgresql.slave.affinity` | PostgreSQL slave node affinity | `{}` | +| `postgresql.slave.tolerations` | PostgreSQL slave node tolerations | `[]` | +| `database.type` | External database type (`postgresql`, `mysql`, `oracle` or `mssql`) | | +| `database.driver` | External database driver e.g. `org.postgresql.Driver` | | +| `database.url` | External database connection URL | | +| `database.user` | External database username | | +| `database.password` | External database password | | +| `database.secrets.user.name` | External database username `Secret` name | | +| `database.secrets.user.key` | External database username `Secret` key | | +| `database.secrets.password.name` | External database password `Secret` name | | +| `database.secrets.password.key` | External database password `Secret` key | | +| `database.secrets.url.name ` | External database url `Secret` name | | +| `database.secrets.url.key` | External database url `Secret` key | | +| `networkpolicy.name` | Becomes part of the NetworkPolicy object name | `artifactory` | +| `networkpolicy.podselector` | Contains the YAML that specifies how to match pods. Usually using matchLabels. | | +| `networkpolicy.ingress` | YAML snippet containing to & from rules applied to incoming traffic | `- {}` (open to all inbound traffic) | +| `networkpolicy.egress` | YAML snippet containing to & from rules applied to outgoing traffic | `- {}` (open to all outbound traffic) | +| `filebeat.enabled` | Enable a filebeat container to send your logs to a log management solution like ELK | `false` | +| `filebeat.name` | filebeat container name | `artifactory-filebeat` | +| `filebeat.image.repository` | filebeat Docker image repository | `docker.elastic.co/beats/filebeat` | +| `filebeat.image.version` | filebeat Docker image version | `7.5.1` | +| `filebeat.logstashUrl` | The URL to the central Logstash service, if you have one | `logstash:5044` | +| `filebeat.livenessProbe.exec.command` | liveness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `filebeat.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.readinessProbe.exec.command` | readiness probe exec command | see [values.yaml](stable/artifactory-ha/values.yaml) | +| `filebeat.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `filebeat.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.resources.requests.memory` | Filebeat initial memory request | | +| `filebeat.resources.requests.cpu` | Filebeat initial cpu request | | +| `filebeat.resources.limits.memory` | Filebeat memory limit | | +| `filebeat.resources.limits.cpu` | Filebeat cpu limit | | +| `filebeat.filebeatYml` | Filebeat yaml configuration file | see [values.yaml](stable/artifactory-ha/values.yaml) | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/ReverseProxyConfiguration.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/ReverseProxyConfiguration.md new file mode 100644 index 000000000..851593236 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory-ha nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory-ha + ``` \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/UPGRADE_NOTES.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/UPGRADE_NOTES.md new file mode 100644 index 000000000..9c7f771be --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/UPGRADE_NOTES.md @@ -0,0 +1,38 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 1.X to 2.X (Chart Versions) + +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* To upgrade from a version prior to 1.x, you first need to upgrade to latest version of 1.x as described in https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md. + +## Upgrade from 0.X to 1.X (Chart Versions) +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + a. Scale down the cluster to primary node only (`node.replicaCount=0`) so the exported db and configuration will be kept on one known node (the primary) + b. If your Artifactory HA K8s service is set to member nodes only (`service.pool=members`) you will need to access the primary node directly (use `kubectl port-forward`) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + a. Scale the cluster member nodes back to the original size + * Artifactory should now be ready to get back to normal operation diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/app-readme.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/app-readme.md new file mode 100644 index 000000000..a5aa5fd47 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/app-readme.md @@ -0,0 +1,16 @@ +# JFrog Artifactory High Availability Helm Chart + +Universal Repository Manager supporting all major packaging formats, build tools and CI servers. + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server(optional) + +## Useful links +Blog: [Herd Trust Into Your Rancher Labs Multi-Cloud Strategy with Artifactory](https://jfrog.com/blog/herd-trust-into-your-rancher-labs-multi-cloud-strategy-with-artifactory/) + +## Activate Your Artifactory Instance +Don't have a license? Please send an email to [rancher-jfrog-licenses@jfrog.com](mailto:rancher-jfrog-licenses@jfrog.com) to get it. diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/.helmignore b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/Chart.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/Chart.yaml new file mode 100644 index 000000000..a61a09ff7 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +appVersion: 11.7.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 8.7.3 diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/README.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/README.md new file mode 100644 index 000000000..c2b848af1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/README.md @@ -0,0 +1,576 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/shmvolume-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 000000000..347d3b40a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/README.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/README.md new file mode 100644 index 000000000..1813a2fea --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/conf.d/README.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/conf.d/README.md new file mode 100644 index 000000000..184c1875d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 000000000..cba38091e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/NOTES.txt b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/NOTES.txt new file mode 100644 index 000000000..3b5e6c60d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,60 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 000000000..708434856 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,420 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/configmap.yaml new file mode 100644 index 000000000..d2178c077 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/extended-config-configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 000000000..8a4119578 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/initialization-configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 000000000..8eb5e0588 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 000000000..524aa2f6a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-svc.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 000000000..c610f09af --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 000000000..ea1fc9b3a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/prometheusrule.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 000000000..44f1242dd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/secrets.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/secrets.yaml new file mode 100644 index 000000000..094d18b49 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 000000000..27e5b516e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/servicemonitor.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 000000000..f3a529a96 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset-slaves.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 000000000..b6d607672 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,299 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 000000000..66eaa01d1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,453 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | indent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- tpl .Values.master.extraInitContainers . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-headless.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 000000000..5c71f468d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-read.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 000000000..92bdda80e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if $serviceAnnotations }} + annotations: {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc.yaml new file mode 100644 index 000000000..299e8d0b7 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if $serviceAnnotations }} + annotations: {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values-production.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values-production.yaml new file mode 100644 index 000000000..d34e326ee --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values-production.yaml @@ -0,0 +1,542 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r65 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r72 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.schema.json b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.schema.json new file mode 100644 index 000000000..ac2de6e94 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.yaml new file mode 100644 index 000000000..e14709a5e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/charts/postgresql/values.yaml @@ -0,0 +1,548 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r65 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r72 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/access-tls-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/access-tls-values.yaml new file mode 100644 index 000000000..2ba5a7af1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/access-tls-values.yaml @@ -0,0 +1,9 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + +access: + accessConfig: + security: + tls: true + resetAccessCAKeys: true diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/default-values.yaml new file mode 100644 index 000000000..01310a3b7 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/default-values.yaml @@ -0,0 +1,6 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +databaseUpgradeReady: true +## This is an exception here because HA needs masterKey to connect with other node members and it is commented in values to support 6.x to 7.x Migration +## Please refer https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#special-upgrade-notes-1 +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/migration-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/migration-disabled-values.yaml new file mode 100644 index 000000000..148618353 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/migration-disabled-values.yaml @@ -0,0 +1,5 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + migration: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/test-values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/test-values.yaml new file mode 100644 index 000000000..0ee7cbdd2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/ci/test-values.yaml @@ -0,0 +1,14 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + persistence: + enabled: true + +postgresql: + image: + tag: 9.6.18-debian-10-r7 + postgresqlPassword: "password" + postgresqlExtendedConf: + maxConnections: "102" + persistence: + enabled: true diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/files/migrate.sh b/charts/artifactory-ha/artifactory-ha/3.0.1400/files/migrate.sh new file mode 100644 index 000000000..e35bfdbb2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/files/migrate.sh @@ -0,0 +1,4339 @@ +#!/bin/bash + +# Flags +FLAG_Y="y" +FLAG_N="n" +FLAGS_Y_N="$FLAG_Y $FLAG_N" +FLAG_NOT_APPLICABLE="_NA_" + +CURRENT_VERSION=$1 + +WRAPPER_SCRIPT_TYPE_RPMDEB="RPMDEB" +WRAPPER_SCRIPT_TYPE_DOCKER_COMPOSE="DOCKERCOMPOSE" + +SENSITIVE_KEY_VALUE="__sensitive_key_hidden___" + +# Shared system keys +SYS_KEY_SHARED_JFROGURL="shared.jfrogUrl" +SYS_KEY_SHARED_SECURITY_JOINKEY="shared.security.joinKey" +SYS_KEY_SHARED_SECURITY_MASTERKEY="shared.security.masterKey" + +SYS_KEY_SHARED_NODE_ID="shared.node.id" +SYS_KEY_SHARED_JAVAHOME="shared.javaHome" + +SYS_KEY_SHARED_DATABASE_TYPE="shared.database.type" +SYS_KEY_SHARED_DATABASE_TYPE_VALUE_POSTGRES="postgresql" +SYS_KEY_SHARED_DATABASE_DRIVER="shared.database.driver" +SYS_KEY_SHARED_DATABASE_URL="shared.database.url" +SYS_KEY_SHARED_DATABASE_USERNAME="shared.database.username" +SYS_KEY_SHARED_DATABASE_PASSWORD="shared.database.password" + +SYS_KEY_SHARED_ELASTICSEARCH_URL="shared.elasticsearch.url" +SYS_KEY_SHARED_ELASTICSEARCH_USERNAME="shared.elasticsearch.username" +SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD="shared.elasticsearch.password" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP="shared.elasticsearch.clusterSetup" +SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE="shared.elasticsearch.unicastFile" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP_VALUE="YES" + +# Define this in product specific script. Should contain the path to unitcast file +# File used by insight server to write cluster active nodes info. This will be read by elasticsearch +#SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE_VALUE="" + +SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME="shared.rabbitMq.active.node.name" +SYS_KEY_RABBITMQ_ACTIVE_NODE_IP="shared.rabbitMq.active.node.ip" + +# Filenames +FILE_NAME_SYSTEM_YAML="system.yaml" +FILE_NAME_JOIN_KEY="join.key" +FILE_NAME_MASTER_KEY="master.key" +FILE_NAME_INSTALLER_YAML="installer.yaml" + +# Global constants used in business logic +NODE_TYPE_STANDALONE="standalone" +NODE_TYPE_CLUSTER_NODE="node" +NODE_TYPE_DATABASE="database" + +# External(isable) databases +DATABASE_POSTGRES="POSTGRES" +DATABASE_ELASTICSEARCH="ELASTICSEARCH" +DATABASE_RABBITMQ="RABBITMQ" + +POSTGRES_LABEL="PostgreSQL" +ELASTICSEARCH_LABEL="Elasticsearch" +RABBITMQ_LABEL="Rabbitmq" + +ARTIFACTORY_LABEL="Artifactory" +JFMC_LABEL="Mission Control" +DISTRIBUTION_LABEL="Distribution" +XRAY_LABEL="Xray" + +POSTGRES_CONTAINER="postgres" +ELASTICSEARCH_CONTAINER="elasticsearch" +RABBITMQ_CONTAINER="rabbitmq" +REDIS_CONTAINER="redis" + +#Adding a small timeout before a read ensures it is positioned correctly in the screen +read_timeout=0.5 + +# Options related to data directory location +PROMPT_DATA_DIR_LOCATION="Installation Directory" +KEY_DATA_DIR_LOCATION="installer.data_dir" + +SYS_KEY_SHARED_NODE_HAENABLED="shared.node.haEnabled" +PROMPT_ADD_TO_CLUSTER="Are you adding an additional node to an existing product cluster?" +KEY_ADD_TO_CLUSTER="installer.ha" +VALID_VALUES_ADD_TO_CLUSTER="$FLAGS_Y_N" + +MESSAGE_POSTGRES_INSTALL="The installer can install a $POSTGRES_LABEL database, or you can connect to an existing compatible $POSTGRES_LABEL database\n(compatible databases: https://www.jfrog.com/confluence/display/JFROG/System+Requirements#SystemRequirements-RequirementsMatrix)" +PROMPT_POSTGRES_INSTALL="Do you want to install $POSTGRES_LABEL?" +KEY_POSTGRES_INSTALL="installer.install_postgresql" +VALID_VALUES_POSTGRES_INSTALL="$FLAGS_Y_N" + +# Postgres connection details +RPM_DEB_POSTGRES_HOME_DEFAULT="/var/opt/jfrog/postgres" +RPM_DEB_MESSAGE_STANDALONE_POSTGRES_DATA="$POSTGRES_LABEL home will have data and its configuration" +RPM_DEB_PROMPT_STANDALONE_POSTGRES_DATA="Type desired $POSTGRES_LABEL home location" +RPM_DEB_KEY_STANDALONE_POSTGRES_DATA="installer.postgresql.home" + +MESSAGE_DATABASE_URL="Provide the database connection details" +PROMPT_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://:/artifactory" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://:/mission_control?sslmode=disable" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://:/distribution?sslmode=disable" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://:/xraydb?sslmode=disable" + ;; + esac + if [ -z "$databaseURlExample" ]; then + echo -n "$POSTGRES_LABEL URL" # For consistency with username and password + return + fi + echo -n "$POSTGRES_LABEL url. Example: [$databaseURlExample]" +} +REGEX_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://.*/artifactory.*" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://.*/mission_control.*" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://.*/distribution.*" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://.*/xraydb.*" + ;; + esac + echo -n "^$databaseURlExample\$" +} +ERROR_MESSAGE_DATABASE_URL="Invalid $POSTGRES_LABEL URL" +KEY_DATABASE_URL="$SYS_KEY_SHARED_DATABASE_URL" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_USERNAME="$POSTGRES_LABEL username" +KEY_DATABASE_USERNAME="$SYS_KEY_SHARED_DATABASE_USERNAME" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_PASSWORD="$POSTGRES_LABEL password" +KEY_DATABASE_PASSWORD="$SYS_KEY_SHARED_DATABASE_PASSWORD" +IS_SENSITIVE_DATABASE_PASSWORD="$FLAG_Y" + +MESSAGE_STANDALONE_ELASTICSEARCH_INSTALL="The installer can install a $ELASTICSEARCH_LABEL database or you can connect to an existing compatible $ELASTICSEARCH_LABEL database" +PROMPT_STANDALONE_ELASTICSEARCH_INSTALL="Do you want to install $ELASTICSEARCH_LABEL?" +KEY_STANDALONE_ELASTICSEARCH_INSTALL="installer.install_elasticsearch" +VALID_VALUES_STANDALONE_ELASTICSEARCH_INSTALL="$FLAGS_Y_N" + +# Elasticsearch connection details +MESSAGE_ELASTICSEARCH_DETAILS="Provide the $ELASTICSEARCH_LABEL connection details" +PROMPT_ELASTICSEARCH_URL="$ELASTICSEARCH_LABEL URL" +KEY_ELASTICSEARCH_URL="$SYS_KEY_SHARED_ELASTICSEARCH_URL" + +PROMPT_ELASTICSEARCH_USERNAME="$ELASTICSEARCH_LABEL username" +KEY_ELASTICSEARCH_USERNAME="$SYS_KEY_SHARED_ELASTICSEARCH_USERNAME" + +PROMPT_ELASTICSEARCH_PASSWORD="$ELASTICSEARCH_LABEL password" +KEY_ELASTICSEARCH_PASSWORD="$SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD" +IS_SENSITIVE_ELASTICSEARCH_PASSWORD="$FLAG_Y" + +# Cluster related questions +MESSAGE_CLUSTER_MASTER_KEY="Provide the cluster's master key. It can be found in the data directory of the first node under /etc/security/master.key" +PROMPT_CLUSTER_MASTER_KEY="Master Key" +KEY_CLUSTER_MASTER_KEY="$SYS_KEY_SHARED_SECURITY_MASTERKEY" +IS_SENSITIVE_CLUSTER_MASTER_KEY="$FLAG_Y" + +MESSAGE_JOIN_KEY="The Join key is the secret key used to establish trust between services in the JFrog Platform.\n(You can copy the Join Key from Admin > Security > Settings)" +PROMPT_JOIN_KEY="Join Key" +KEY_JOIN_KEY="$SYS_KEY_SHARED_SECURITY_JOINKEY" +IS_SENSITIVE_JOIN_KEY="$FLAG_Y" +REGEX_JOIN_KEY="^[a-zA-Z0-9]{16,}\$" +ERROR_MESSAGE_JOIN_KEY="Invalid Join Key" + +# Rabbitmq related cluster information +MESSAGE_RABBITMQ_ACTIVE_NODE_NAME="Provide an active ${RABBITMQ_LABEL} node name. Run the command [ hostname -s ] on any of the existing nodes in the product cluster to get this" +PROMPT_RABBITMQ_ACTIVE_NODE_NAME="${RABBITMQ_LABEL} active node name" +KEY_RABBITMQ_ACTIVE_NODE_NAME="$SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME" + +# Rabbitmq related cluster information (necessary only for docker-compose) +PROMPT_RABBITMQ_ACTIVE_NODE_IP="${RABBITMQ_LABEL} active node ip" +KEY_RABBITMQ_ACTIVE_NODE_IP="$SYS_KEY_RABBITMQ_ACTIVE_NODE_IP" + +MESSAGE_JFROGURL(){ + echo -e "The JFrog URL allows ${PRODUCT_NAME} to connect to a JFrog Platform Instance.\n(You can copy the JFrog URL from Admin > Security > Settings)" +} +PROMPT_JFROGURL="JFrog URL" +KEY_JFROGURL="$SYS_KEY_SHARED_JFROGURL" +REGEX_JFROGURL="^https?://.*:{0,}[0-9]{0,4}\$" +ERROR_MESSAGE_JFROGURL="Invalid JFrog URL" + + +# Set this to FLAG_Y on upgrade +IS_UPGRADE="${FLAG_N}" + +# This belongs in JFMC but is the ONLY one that needs it so keeping it here for now. Can be made into a method and overridden if necessary +MESSAGE_MULTIPLE_PG_SCHEME="Please setup $POSTGRES_LABEL with schema as described in https://www.jfrog.com/confluence/display/JFROG/Installing+Mission+Control" + +_getMethodOutputOrVariableValue() { + unset EFFECTIVE_MESSAGE + local keyToSearch=$1 + local effectiveMessage= + local result="0" + # logSilly "Searching for method: [$keyToSearch]" + LC_ALL=C type "$keyToSearch" > /dev/null 2>&1 || result="$?" + if [[ "$result" == "0" ]]; then + # logSilly "Found method for [$keyToSearch]" + EFFECTIVE_MESSAGE="$($keyToSearch)" + return + fi + eval EFFECTIVE_MESSAGE=\${$keyToSearch} + if [ ! -z "$EFFECTIVE_MESSAGE" ]; then + return + fi + # logSilly "Didn't find method or variable for [$keyToSearch]" +} + + +# REF https://misc.flogisoft.com/bash/tip_colors_and_formatting +cClear="\e[0m" +cBlue="\e[38;5;69m" +cRedDull="\e[1;31m" +cYellow="\e[1;33m" +cRedBright="\e[38;5;197m" +cBold="\e[1m" + + +_loggerGetModeRaw() { + local MODE="$1" + case $MODE in + INFO) + printf "" + ;; + DEBUG) + printf "%s" "[${MODE}] " + ;; + WARN) + printf "${cRedDull}%s%s${cClear}" "[" "${MODE}" "] " + ;; + ERROR) + printf "${cRedBright}%s%s${cClear}" "[" "${MODE}" "] " + ;; + esac +} + + +_loggerGetMode() { + local MODE="$1" + case $MODE in + INFO) + printf "${cBlue}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + DEBUG) + printf "%-7s" "[${MODE}]" + ;; + WARN) + printf "${cRedDull}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + ERROR) + printf "${cRedBright}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + esac +} + +# Capitalises the first letter of the message +_loggerGetMessage() { + local originalMessage="$*" + local firstChar=$(echo "${originalMessage:0:1}" | awk '{ print toupper($0) }') + local resetOfMessage="${originalMessage:1}" + echo "$firstChar$resetOfMessage" +} + +# The spec also says content should be left-trimmed but this is not necessary in our case. We don't reach the limit. +_loggerGetStackTrace() { + printf "%s%-30s%s" "[" "$1:$2" "]" +} + +_loggerGetThread() { + printf "%s" "[main]" +} + +_loggerGetServiceType() { + printf "%s%-5s%s" "[" "shell" "]" +} + +#Trace ID is not applicable to scripts +_loggerGetTraceID() { + printf "%s" "[]" +} + +logRaw() { + echo "" + printf "$1" + echo "" +} + +logBold(){ + echo "" + printf "${cBold}$1${cClear}" + echo "" +} + +# The date binary works differently based on whether it is GNU/BSD +is_date_supported=0 +date --version > /dev/null 2>&1 || is_date_supported=1 +IS_GNU=$(echo $is_date_supported) + +_loggerGetTimestamp() { + if [ "${IS_GNU}" == "0" ]; then + echo -n $(date -u +%FT%T.%3NZ) + else + echo -n $(date -u +%FT%T.000Z) + fi +} + +# https://www.shellscript.sh/tips/spinner/ +_spin() +{ + spinner="/|\\-/|\\-" + while : + do + for i in `seq 0 7` + do + echo -n "${spinner:$i:1}" + echo -en "\010" + sleep 1 + done + done +} + +showSpinner() { + # Start the Spinner: + _spin & + # Make a note of its Process ID (PID): + SPIN_PID=$! + # Kill the spinner on any signal, including our own exit. + trap "kill -9 $SPIN_PID" `seq 0 15` &> /dev/null || return 0 +} + +stopSpinner() { + local occurrences=$(ps -ef | grep -wc "${SPIN_PID}") + let "occurrences+=0" + # validate that it is present (2 since this search itself will show up in the results) + if [ $occurrences -gt 1 ]; then + kill -9 $SPIN_PID &>/dev/null || return 0 + wait $SPIN_ID &>/dev/null + fi +} + +_getEffectiveMessage(){ + local MESSAGE="$1" + local MODE=${2-"INFO"} + + if [ -z "$CONTEXT" ]; then + CONTEXT=$(caller) + fi + + _EFFECTIVE_MESSAGE= + if [ -z "$LOG_BEHAVIOR_ADD_META" ]; then + _EFFECTIVE_MESSAGE="$(_loggerGetModeRaw $MODE)$(_loggerGetMessage $MESSAGE)" + else + local SERVICE_TYPE="script" + local TRACE_ID="" + local THREAD="main" + + local CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}') + local CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}') + + _EFFECTIVE_MESSAGE="$(_loggerGetTimestamp) $(_loggerGetServiceType) $(_loggerGetMode $MODE) $(_loggerGetTraceID) $(_loggerGetStackTrace $CONTEXT_FILE $CONTEXT_LINE) $(_loggerGetThread) - $(_loggerGetMessage $MESSAGE)" + fi + CONTEXT= +} + +# Important - don't call any log method from this method. Will become an infinite loop. Use echo to debug +_logToFile() { + local MODE=${1-"INFO"} + local targetFile="$LOG_BEHAVIOR_ADD_REDIRECTION" + # IF the file isn't passed, abort + if [ -z "$targetFile" ]; then + return + fi + # IF this is not being run in verbose mode and mode is debug or lower, abort + if [ "${VERBOSE_MODE}" != "$FLAG_Y" ] && [ "${VERBOSE_MODE}" != "true" ] && [ "${VERBOSE_MODE}" != "debug" ]; then + if [ "$MODE" == "DEBUG" ] || [ "$MODE" == "SILLY" ]; then + return + fi + fi + + # Create the file if it doesn't exist + if [ ! -f "${targetFile}" ]; then + return + # touch $targetFile > /dev/null 2>&1 || true + fi + # # Make it readable + # chmod 640 $targetFile > /dev/null 2>&1 || true + + # Log contents + printf "%s\n" "$_EFFECTIVE_MESSAGE" >> "$targetFile" || true +} + +logger() { + if [ "$LOG_BEHAVIOR_ADD_NEW_LINE" == "$FLAG_Y" ]; then + echo "" + fi + _getEffectiveMessage "$@" + local MODE=${2-"INFO"} + printf "%s\n" "$_EFFECTIVE_MESSAGE" + _logToFile "$MODE" +} + +logDebug(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "$FLAG_Y" ] || [ "${VERBOSE_MODE}" == "true" ] || [ "${VERBOSE_MODE}" == "debug" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logSilly(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "silly" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logError() { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= +} + +errorExit () { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= + exit 1 +} + +warn () { + CONTEXT=$(caller) + logger "$1" "WARN" + CONTEXT= +} + +note () { + CONTEXT=$(caller) + logger "$1" "NOTE" + CONTEXT= +} + +bannerStart() { + title=$1 + echo + echo -e "\033[1m${title}\033[0m" + echo +} + +bannerSection() { + title=$1 + echo + echo -e "******************************** ${title} ********************************" + echo +} + +bannerSubSection() { + title=$1 + echo + echo -e "************** ${title} *******************" + echo +} + +bannerMessge() { + title=$1 + echo + echo -e "********************************" + echo -e "${title}" + echo -e "********************************" + echo +} + +setRed () { + local input="$1" + echo -e \\033[31m${input}\\033[0m +} +setGreen () { + local input="$1" + echo -e \\033[32m${input}\\033[0m +} +setYellow () { + local input="$1" + echo -e \\033[33m${input}\\033[0m +} + +logger_addLinebreak () { + echo -e "---\n" +} + +bannerImportant() { + title=$1 + local bold="\033[1m" + local noColour="\033[0m" + echo + echo -e "${bold}######################################## IMPORTANT ########################################${noColour}" + echo -e "${bold}${title}${noColour}" + echo -e "${bold}###########################################################################################${noColour}" + echo +} + +bannerEnd() { + #TODO pass a title and calculate length dynamically so that start and end look alike + echo + echo "*****************************************************************************" + echo +} + +banner() { + title=$1 + content=$2 + bannerStart "${title}" + echo -e "$content" +} + +# The logic below helps us redirect content we'd normally hide to the log file. + # + # We have several commands which clutter the console with output and so use + # `cmd > /dev/null` - this redirects the command's output to null. + # + # However, the information we just hid maybe useful for support. Using the code pattern + # `cmd >&6` (instead of `cmd> >/dev/null` ), the command's output is hidden from the console + # but redirected to the installation log file + # + +#Default value of 6 is just null +exec 6>>/dev/null +redirectLogsToFile() { + echo "" + # local file=$1 + + # [ ! -z "${file}" ] || return 0 + + # local logDir=$(dirname "$file") + + # if [ ! -f "${file}" ]; then + # [ -d "${logDir}" ] || mkdir -p ${logDir} || \ + # ( echo "WARNING : Could not create parent directory (${logDir}) to redirect console log : ${file}" ; return 0 ) + # fi + + # #6 now points to the log file + # exec 6>>${file} + # #reference https://unix.stackexchange.com/questions/145651/using-exec-and-tee-to-redirect-logs-to-stdout-and-a-log-file-in-the-same-time + # exec 2>&1 > >(tee -a "${file}") +} + +# Check if a give key contains any sensitive string as part of it +# Based on the result, the caller can decide its value can be displayed or not +# Sample usage : isKeySensitive "${key}" && displayValue="******" || displayValue=${value} +isKeySensitive(){ + local key=$1 + local sensitiveKeys="password|secret|key|token" + + if [ -z "${key}" ]; then + return 1 + else + local lowercaseKey=$(echo "${key}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + [[ "${lowercaseKey}" =~ ${sensitiveKeys} ]] && return 0 || return 1 + fi +} + +getPrintableValueOfKey(){ + local displayValue= + local key="$1" + if [ -z "$key" ]; then + # This is actually an incorrect usage of this method but any logging will cause unexpected content in the caller + echo -n "" + return + fi + + local value="$2" + isKeySensitive "${key}" && displayValue="$SENSITIVE_KEY_VALUE" || displayValue="${value}" + echo -n $displayValue +} + +_createConsoleLog(){ + if [ -z "${JF_PRODUCT_HOME}" ]; then + return + fi + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + mkdir -p "${JF_PRODUCT_HOME}/var/log" || true + if [ ! -f ${targetFile} ]; then + touch $targetFile > /dev/null 2>&1 || true + fi + chmod 640 $targetFile > /dev/null 2>&1 || true +} + +# Output from application's logs are piped to this method. It checks a configuration variable to determine if content should be logged to +# the common console.log file +redirectServiceLogsToFile() { + + local result="0" + # check if the function getSystemValue exists + LC_ALL=C type getSystemValue > /dev/null 2>&1 || result="$?" + if [[ "$result" != "0" ]]; then + warn "Couldn't find the systemYamlHelper. Skipping log redirection" + return 0 + fi + + getSystemValue "shared.consoleLog" "NOT_SET" + if [[ "${YAML_VALUE}" == "false" ]]; then + logger "Redirection is set to false. Skipping log redirection" + return 0; + fi + + if [ -z "${JF_PRODUCT_HOME}" ] || [ "${JF_PRODUCT_HOME}" == "" ]; then + warn "JF_PRODUCT_HOME is unavailable. Skipping log redirection" + return 0 + fi + + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + + _createConsoleLog + + while read -r line; do + printf '%s\n' "${line}" >> $targetFile || return 0 # Don't want to log anything - might clutter the screen + done +} + +## Display environment variables starting with JF_ along with its value +## Value of sensitive keys will be displayed as "******" +## +## Sample Display : +## +## ======================== +## JF Environment variables +## ======================== +## +## JF_SHARED_NODE_ID : locahost +## JF_SHARED_JOINKEY : ****** +## +## +displayEnv() { + local JFEnv=$(printenv | grep ^JF_ 2>/dev/null) + local key= + local value= + + if [ -z "${JFEnv}" ]; then + return + fi + + cat << ENV_START_MESSAGE + +======================== +JF Environment variables +======================== +ENV_START_MESSAGE + + for entry in ${JFEnv}; do + key=$(echo "${entry}" | awk -F'=' '{print $1}') + value=$(echo "${entry}" | awk -F'=' '{print $2}') + + isKeySensitive "${key}" && value="******" || value=${value} + + printf "\n%-35s%s" "${key}" " : ${value}" + done + echo; +} + +_addLogRotateConfiguration() { + logDebug "Method ${FUNCNAME[0]}" + # mandatory inputs + local confFile="$1" + local logFile="$2" + + # Method available in _ioOperations.sh + LC_ALL=C type io_setYQPath > /dev/null 2>&1 || return 1 + + io_setYQPath + + # Method available in _systemYamlHelper.sh + LC_ALL=C type getSystemValue > /dev/null 2>&1 || return 1 + + local frequency="daily" + local archiveFolder="archived" + + local compressLogFiles= + getSystemValue "shared.logging.rotation.compress" "true" + if [[ "${YAML_VALUE}" == "true" ]]; then + compressLogFiles="compress" + fi + + getSystemValue "shared.logging.rotation.maxFiles" "10" + local noOfBackupFiles="${YAML_VALUE}" + + getSystemValue "shared.logging.rotation.maxSizeMb" "25" + local sizeOfFile="${YAML_VALUE}M" + + logDebug "Adding logrotate configuration for [$logFile] to [$confFile]" + + # Add configuration to file + local confContent=$(cat << LOGROTATECONF +$logFile { + $frequency + missingok + rotate $noOfBackupFiles + $compressLogFiles + notifempty + olddir $archiveFolder + dateext + extension .log + dateformat -%Y-%m-%d + size ${sizeOfFile} +} +LOGROTATECONF +) + echo "${confContent}" > ${confFile} || return 1 +} + +_operationIsBySameUser() { + local targetUser="$1" + local currentUserID=$(id -u) + local currentUserName=$(id -un) + + if [ $currentUserID == $targetUser ] || [ $currentUserName == $targetUser ]; then + echo -n "yes" + else + echo -n "no" + fi +} + +_addCronJobForLogrotate() { + logDebug "Method ${FUNCNAME[0]}" + + # Abort if logrotate is not available + [ "$(io_commandExists 'crontab')" != "yes" ] && warn "cron is not available" && return 1 + + # mandatory inputs + local productHome="$1" + local confFile="$2" + local cronJobOwner="$3" + + # We want to use our binary if possible. It may be more recent than the one in the OS + local logrotateBinary="$productHome/app/third-party/logrotate/logrotate" + + if [ ! -f "$logrotateBinary" ]; then + logrotateBinary="logrotate" + [ "$(io_commandExists 'logrotate')" != "yes" ] && warn "logrotate is not available" && return 1 + fi + local cmd="$logrotateBinary ${confFile} --state $productHome/var/etc/logrotate/logrotate-state" #--verbose + + id -u $cronJobOwner > /dev/null 2>&1 || { warn "User $cronJobOwner does not exist. Aborting logrotate configuration" && return 1; } + + # Remove the existing line + removeLogRotation "$productHome" "$cronJobOwner" || true + + # Run logrotate daily at 23:55 hours + local cronInterval="55 23 * * * $cmd" + + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + # If this is standalone mode, we cannot use -u - the user running this process may not have the necessary privileges + if [ "$standaloneMode" == "no" ]; then + (crontab -l -u $cronJobOwner 2>/dev/null; echo "$cronInterval") | crontab -u $cronJobOwner - + else + (crontab -l 2>/dev/null; echo "$cronInterval") | crontab - + fi +} + +## Configure logrotate for a product +## Failure conditions: +## If logrotation could not be setup for some reason +## Parameters: +## $1: The product name +## $2: The product home +## Depends on global: none +## Updates global: none +## Returns: NA + +configureLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + + # mandatory inputs + local productName="$1" + if [ -z $productName ]; then + warn "Incorrect usage. A product name is necessary for configuring log rotation" && return 1 + fi + + local productHome="$2" + if [ -z $productHome ]; then + warn "Incorrect usage. A product home folder is necessary for configuring log rotation" && return 1 + fi + + local logFile="${productHome}/var/log/console.log" + if [[ $(uname) == "Darwin" ]]; then + logger "Log rotation for [$logFile] has not been configured. Please setup manually" + return 0 + fi + + local userID="$3" + if [ -z $userID ]; then + warn "Incorrect usage. A userID is necessary for configuring log rotation" && return 1 + fi + + local groupID=${4:-$userID} + local logConfigOwner=${5:-$userID} + + logDebug "Configuring log rotation as user [$userID], group [$groupID], effective cron User [$logConfigOwner]" + + local errorMessage="Could not configure logrotate. Please configure log rotation of the file: [$logFile] manually" + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + # TODO move to recursive method + createDir "${productHome}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log/archived" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + + # TODO move to recursive method + createDir "${productHome}/var/etc" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/etc/logrotate" "$logConfigOwner" || { warn "${errorMessage}" && return 1; } + + # conf file should be owned by the user running the script + createFile "${confFile}" "${logConfigOwner}" || { warn "Could not create configuration file [$confFile]" return 1; } + + _addLogRotateConfiguration "${confFile}" "${logFile}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + _addCronJobForLogrotate "${productHome}" "${confFile}" "${logConfigOwner}" || { warn "${errorMessage}" && return 1; } +} + +_pauseExecution() { + if [ "${VERBOSE_MODE}" == "debug" ]; then + + local breakPoint="$1" + if [ ! -z "$breakPoint" ]; then + printf "${cBlue}Breakpoint${cClear} [$breakPoint] " + echo "" + fi + printf "${cBlue}Press enter once you are ready to continue${cClear}" + read -s choice + echo "" + fi +} + +# removeLogRotation "$productHome" "$cronJobOwner" || true +removeLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + if [[ $(uname) == "Darwin" ]]; then + logDebug "Not implemented for Darwin." + return 0 + fi + local productHome="$1" + local cronJobOwner="$2" + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + if [ "$standaloneMode" == "no" ]; then + crontab -l -u $cronJobOwner 2>/dev/null | grep -v "$confFile" | crontab -u $cronJobOwner - + else + crontab -l 2>/dev/null | grep -v "$confFile" | crontab - + fi +} + +# NOTE: This method does not check the configuration to see if redirection is necessary. +# This is intentional. If we don't redirect, tomcat logs might get redirected to a folder/file +# that does not exist, causing the service itself to not start +setupTomcatRedirection() { + logDebug "Method ${FUNCNAME[0]}" + local consoleLog="${JF_PRODUCT_HOME}/var/log/console.log" + _createConsoleLog + export CATALINA_OUT="${consoleLog}" +} + +setupScriptLogsRedirection() { + logDebug "Method ${FUNCNAME[0]}" + if [ -z "${JF_PRODUCT_HOME}" ]; then + logDebug "No JF_PRODUCT_HOME. Returning" + return + fi + # Create the console.log file if it is not already present + # _createConsoleLog || true + # # Ensure any logs (logger/logError/warn) also get redirected to the console.log + # # Using installer.log as a temparory fix. Please change this to console.log once INST-291 is fixed + export LOG_BEHAVIOR_ADD_REDIRECTION="${JF_PRODUCT_HOME}/var/log/console.log" + export LOG_BEHAVIOR_ADD_META="$FLAG_Y" +} + +# Returns Y if this method is run inside a container +isRunningInsideAContainer() { + if [ -f "/.dockerenv" ]; then + echo -n "$FLAG_Y" + else + echo -n "$FLAG_N" + fi +} + +POSTGRES_USER=999 +NGINX_USER=104 +NGINX_GROUP=107 +ES_USER=1000 +REDIS_USER=999 +MONGO_USER=999 +RABBITMQ_USER=999 +LOG_FILE_PERMISSION=640 +PID_FILE_PERMISSION=644 + +# Copy file +copyFile(){ + local source=$1 + local target=$2 + local mode=${3:-overwrite} + local enableVerbose=${4:-"${FLAG_N}"} + local verboseFlag="" + + if [ ! -z "${enableVerbose}" ] && [ "${enableVerbose}" == "${FLAG_Y}" ]; then + verboseFlag="-v" + fi + + if [[ ! ( $source && $target ) ]]; then + warn "Source and target is mandatory to copy file" + return 1 + fi + + if [[ -f "${target}" ]]; then + [[ "$mode" = "overwrite" ]] && ( cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}") || true + else + cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}" + fi +} + +# Copy files recursively from given source directory to destination directory +# This method wil copy but will NOT overwrite +# Destination will be created if its not available +copyFilesNoOverwrite(){ + local src=$1 + local dest=$2 + local enableVerboseCopy="${3:-${FLAG_Y}}" + + if [[ -z "${src}" || -z "${dest}" ]]; then + return + fi + + if [ -d "${src}" ] && [ "$(ls -A ${src})" ]; then + local relativeFilePath="" + local targetFilePath="" + + for file in $(find ${src} -type f 2>/dev/null) ; do + # Derive relative path and attach it to destination + # Example : + # src=/extra_config + # dest=/var/opt/jfrog/artifactory/etc + # file=/extra_config/config.xml + # relativeFilePath=config.xml + # targetFilePath=/var/opt/jfrog/artifactory/etc/config.xml + relativeFilePath=${file/${src}/} + targetFilePath=${dest}${relativeFilePath} + + createDir "$(dirname "$targetFilePath")" + copyFile "${file}" "${targetFilePath}" "no_overwrite" "${enableVerboseCopy}" + done + fi +} + +# TODO : WINDOWS ? +# Check the max open files and open processes set on the system +checkULimits () { + local minMaxOpenFiles=${1:-32000} + local minMaxOpenProcesses=${2:-1024} + local setValue=${3:-true} + local warningMsgForFiles=${4} + local warningMsgForProcesses=${5} + + logger "Checking open files and processes limits" + + local currentMaxOpenFiles=$(ulimit -n) + logger "Current max open files is $currentMaxOpenFiles" + if [ ${currentMaxOpenFiles} != "unlimited" ] && [ "$currentMaxOpenFiles" -lt "$minMaxOpenFiles" ]; then + if [ "${setValue}" ]; then + ulimit -n "${minMaxOpenFiles}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForFiles}" ] || warn "${warningMsgForFiles}" + else + errorExit "Max number of open files $currentMaxOpenFiles, is too low. Cannot run the application!" + fi + fi + + local currentMaxOpenProcesses=$(ulimit -u) + logger "Current max open processes is $currentMaxOpenProcesses" + if [ "$currentMaxOpenProcesses" != "unlimited" ] && [ "$currentMaxOpenProcesses" -lt "$minMaxOpenProcesses" ]; then + if [ "${setValue}" ]; then + ulimit -u "${minMaxOpenProcesses}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForProcesses}" ] || warn "${warningMsgForProcesses}" + else + errorExit "Max number of open files $currentMaxOpenProcesses, is too low. Cannot run the application!" + fi + fi +} + +createDirs() { + local appDataDir=$1 + local serviceName=$2 + local folders="backup bootstrap data etc logs work" + + [ -z "${appDataDir}" ] && errorExit "An application directory is mandatory to create its data structure" || true + [ -z "${serviceName}" ] && errorExit "A service name is mandatory to create service data structure" || true + + for folder in ${folders} + do + folder=${appDataDir}/${folder}/${serviceName} + if [ ! -d "${folder}" ]; then + logger "Creating folder : ${folder}" + mkdir -p "${folder}" || errorExit "Failed to create ${folder}" + fi + done +} + + +testReadWritePermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local test_file=${dir_to_check}/test-permissions + + # Write file + if echo test > ${test_file} 1> /dev/null 2>&1; then + # Write succeeded. Testing read... + if cat ${test_file} > /dev/null; then + rm -f ${test_file} + else + error=true + fi + else + error=true + fi + + if [ ${error} == true ]; then + return 1 + else + return 0 + fi +} + +# Test directory has read/write permissions for current user +testDirectoryPermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local u_id=$(id -u) + local id_str="id ${u_id}" + + logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}" + + if ! testReadWritePermissions ${dir_to_check}; then + error=true + fi + + if [ "${error}" == true ]; then + local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check}) + logger "###########################################################" + logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}" + logger "${stat_data}" + logger "Mounted directory must have read/write permissions for user ${id_str}" + logger "###########################################################" + errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}" + fi + logger "Permissions for ${dir_to_check} are good" +} + +# Utility method to create a directory path recursively with chown feature as +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: Root directory from where the path can be created +## $2: List of recursive child directories seperated by space +## $3: user who should own the directory. Optional +## $4: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA +# +# Usage: +# createRecursiveDir "/opt/jfrog/product/var" "bootstrap tomcat lib" "user_name" "group_name" +createRecursiveDir(){ + local rootDir=$1 + local pathDirs=$2 + local user=$3 + local group=${4:-${user}} + local fullPath= + + [ ! -z "${rootDir}" ] || return 0 + + createDir "${rootDir}" "${user}" "${group}" + + [ ! -z "${pathDirs}" ] || return 0 + + fullPath=${rootDir} + + for dir in ${pathDirs}; do + fullPath=${fullPath}/${dir} + createDir "${fullPath}" "${user}" "${group}" + done +} + +# Utility method to create a directory +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: directory to create +## $2: user who should own the directory. Optional +## $3: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA + +createDir(){ + local dirName="$1" + local printMessage=no + logSilly "Method ${FUNCNAME[0]} invoked with [$dirName]" + [ -z "${dirName}" ] && return + + logDebug "Attempting to create ${dirName}" + mkdir -p "${dirName}" || errorExit "Unable to create directory: [${dirName}]" + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + # Earlier, this line would have returned 1 if it failed. Now it just warns. + # This is intentional. Earlier, this line would NOT be reached if the folder already existed. + # Since it will always come to this line and the script may be running as a non-root user, this method will just warn if + # setting permissions fails (so as to not affect any existing flows) + io_setOwnershipNonRecursive "$dirName" "$userID" "$groupID" || warn "Could not set owner of [$dirName] to [$userID:$groupID]" + fi + # logging message to print created dir with user and group + local logMessage=${4:-$printMessage} + if [[ "${logMessage}" == "yes" ]]; then + logger "Successfully created directory [${dirName}]. Owner: [${userID}:${groupID}]" + fi +} + +removeSoftLinkAndCreateDir () { + local dirName="$1" + local userID="$2" + local groupID="$3" + local logMessage="$4" + removeSoftLink "${dirName}" + createDir "${dirName}" "${userID}" "${groupID}" "${logMessage}" +} + +# Utility method to remove a soft link +removeSoftLink () { + local dirName="$1" + if [[ -L "${dirName}" ]]; then + targetLink=$(readlink -f "${dirName}") + logger "Removing the symlink [${dirName}] pointing to [${targetLink}]" + rm -f "${dirName}" + fi +} + +# Check Directory exist in the path +checkDirExists () { + local directoryPath="$1" + + [[ -d "${directoryPath}" ]] && echo -n "true" || echo -n "false" +} + + +# Utility method to create a file +# Failure conditions: +# Parameters: +## $1: file to create +# Depends on global: none +# Updates global: none +# Returns: NA + +createFile(){ + local fileName="$1" + logSilly "Method ${FUNCNAME[0]} [$fileName]" + [ -f "${fileName}" ] && return 0 + touch "${fileName}" || return 1 + + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + io_setOwnership "$fileName" "$userID" "$groupID" || return 1 + fi +} + +# Check File exist in the filePath +# IMPORTANT- DON'T ADD LOGGING to this method +checkFileExists () { + local filePath="$1" + + [[ -f "${filePath}" ]] && echo -n "true" || echo -n "false" +} + +# Check for directories contains any (files or sub directories) +# IMPORTANT- DON'T ADD LOGGING to this method +checkDirContents () { + local directoryPath="$1" + if [[ "$(ls -1 "${directoryPath}" | wc -l)" -gt 0 ]]; then + echo -n "true" + else + echo -n "false" + fi +} + +# Check contents exist in directory +# IMPORTANT- DON'T ADD LOGGING to this method +checkContentExists () { + local source="$1" + + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + echo -n "false" + else + echo -n "true" + fi +} + +# Resolve the variable +# IMPORTANT- DON'T ADD LOGGING to this method +evalVariable () { + local output="$1" + local input="$2" + + eval "${output}"=\${"${input}"} + eval echo \${"${output}"} +} + +# Usage: if [ "$(io_commandExists 'curl')" == "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_commandExists() { + local commandToExecute="$1" + hash "${commandToExecute}" 2>/dev/null + local rt=$? + if [ "$rt" == 0 ]; then echo -n "yes"; else echo -n "no"; fi +} + +# Usage: if [ "$(io_curlExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_curlExists() { + io_commandExists "curl" +} + + +io_hasMatch() { + logSilly "Method ${FUNCNAME[0]}" + local result=0 + logDebug "Executing [echo \"$1\" | grep \"$2\" >/dev/null 2>&1]" + echo "$1" | grep "$2" >/dev/null 2>&1 || result=1 + return $result +} + +# Utility method to check if the string passed (usually a connection url) corresponds to this machine itself +# Failure conditions: None +# Parameters: +## $1: string to check against +# Depends on global: none +# Updates global: IS_LOCALHOST with value "yes/no" +# Returns: NA + +io_getIsLocalhost() { + logSilly "Method ${FUNCNAME[0]}" + IS_LOCALHOST="$FLAG_N" + local inputString="$1" + logDebug "Parsing [$inputString] to check if we are dealing with this machine itself" + + io_hasMatch "$inputString" "localhost" && { + logDebug "Found localhost. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for localhost" + + local hostIP=$(io_getPublicHostIP) + io_hasMatch "$inputString" "$hostIP" && { + logDebug "Found $hostIP. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostIP" + + local hostID=$(io_getPublicHostID) + io_hasMatch "$inputString" "$hostID" && { + logDebug "Found $hostID. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostID" + + local hostName=$(io_getPublicHostName) + io_hasMatch "$inputString" "$hostName" && { + logDebug "Found $hostName. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostName" + +} + +# Usage: if [ "$(io_tarExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_tarExists() { + io_commandExists "tar" +} + +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostIP() { + local OS_TYPE=$(uname) + local publicHostIP= + if [ "${OS_TYPE}" == "Darwin" ]; then + ipStatus=$(ifconfig en0 | grep "status" | awk '{print$2}') + if [ "${ipStatus}" == "active" ]; then + publicHostIP=$(ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}') + else + errorExit "Host IP could not be resolved!" + fi + elif [ "${OS_TYPE}" == "Linux" ]; then + publicHostIP=$(hostname -i 2>/dev/null || echo "127.0.0.1") + fi + publicHostIP=$(echo "${publicHostIP}" | awk '{print $1}') + echo -n "${publicHostIP}" +} + +# Will return the short host name (up to the first dot) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostName() { + echo -n "$(hostname -s)" +} + +# Will return the full host name (use this as much as possible) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostID() { + echo -n "$(hostname)" +} + +# Utility method to backup a file +# Failure conditions: NA +# Parameters: filePath +# Depends on global: none, +# Updates global: none +# Returns: NA +io_backupFile() { + logSilly "Method ${FUNCNAME[0]}" + fileName="$1" + if [ ! -f "${filePath}" ]; then + logDebug "No file: [${filePath}] to backup" + return + fi + dateTime=$(date +"%Y-%m-%d-%H-%M-%S") + targetFileName="${fileName}.backup.${dateTime}" + yes | \cp -f "$fileName" "${targetFileName}" + logger "File [${fileName}] backedup as [${targetFileName}]" +} + +# Reference https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash/4025065#4025065 +is_number() { + case "$BASH_VERSION" in + 3.1.*) + PATTERN='\^\[0-9\]+\$' + ;; + *) + PATTERN='^[0-9]+$' + ;; + esac + + [[ "$1" =~ $PATTERN ]] +} + +io_compareVersions() { + if [[ $# != 2 ]] + then + echo "Usage: min_version current minimum" + return + fi + + A="${1%%.*}" + B="${2%%.*}" + + if [[ "$A" != "$1" && "$B" != "$2" && "$A" == "$B" ]] + then + io_compareVersions "${1#*.}" "${2#*.}" + else + if is_number "$A" && is_number "$B" + then + if [[ "$A" -eq "$B" ]]; then + echo "0" + elif [[ "$A" -gt "$B" ]]; then + echo "1" + elif [[ "$A" -lt "$B" ]]; then + echo "-1" + fi + fi + fi +} + +# Reference https://stackoverflow.com/questions/369758/how-to-trim-whitespace-from-a-bash-variable +# Strip all leading and trailing spaces +# IMPORTANT- DON'T ADD LOGGING to this method +io_trim() { + local var="$1" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# temporary function will be removing it ASAP +# search for string and replace text in file +replaceText_migration_hook () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + fi +} + +# search for string and replace text in file +replaceText () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + logDebug "Replaced [$regexString] with [$replaceText] in [$file]" + fi +} + +# search for string and prepend text in file +prependText () { + local regexString="$1" + local text="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + else + sed -i -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + fi +} + +# add text to beginning of the file +addText () { + local text="$1" + local file="$2" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + else + sed -i -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + fi +} + +io_replaceString () { + local value="$1" + local firstString="$2" + local secondString="$3" + local separator=${4:-"/"} + local updateValue= + if [[ $(uname) == "Darwin" ]]; then + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + else + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + fi + echo -n "${updateValue}" +} + +_findYQ() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + local parentDir="$1" + if [ -z "$parentDir" ]; then + return + fi + logDebug "Executing command [find "${parentDir}" -name third-party -type d]" + local yq=$(find "${parentDir}" -name third-party -type d) + if [ -d "${yq}/yq" ]; then + export YQ_PATH="${yq}/yq" + fi +} + + +io_setYQPath() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + if [ "$(io_commandExists 'yq')" == "yes" ]; then + return + fi + + if [ ! -z "${JF_PRODUCT_HOME}" ] && [ -d "${JF_PRODUCT_HOME}" ]; then + _findYQ "${JF_PRODUCT_HOME}" + fi + + if [ -z "${YQ_PATH}" ] && [ ! -z "${COMPOSE_HOME}" ] && [ -d "${COMPOSE_HOME}" ]; then + _findYQ "${COMPOSE_HOME}" + fi + # TODO We can remove this block after all the code is restructured. + if [ -z "${YQ_PATH}" ] && [ ! -z "${SCRIPT_HOME}" ] && [ -d "${SCRIPT_HOME}" ]; then + _findYQ "${SCRIPT_HOME}" + fi + +} + +io_getLinuxDistribution() { + LINUX_DISTRIBUTION= + + # Make sure running on Linux + [ $(uname -s) != "Linux" ] && return + + # Find out what Linux distribution we are on + + cat /etc/*-release | grep -i Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 6.x + cat /etc/issue.net | grep Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 7.x + cat /etc/*-release | grep -i centos >/dev/null 2>&1 && LINUX_DISTRIBUTION=CentOS && LINUX_DISTRIBUTION_VER="7" || true + + # OS 8.x + grep -q -i "release 8" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="8" || true + + # OS 7.x + grep -q -i "release 7" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="7" || true + + # OS 6.x + grep -q -i "release 6" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="6" || true + + cat /etc/*-release | grep -i Red | grep -i 'VERSION=7' >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat && LINUX_DISTRIBUTION_VER="7" || true + + cat /etc/*-release | grep -i debian >/dev/null 2>&1 && LINUX_DISTRIBUTION=Debian || true + + cat /etc/*-release | grep -i ubuntu >/dev/null 2>&1 && LINUX_DISTRIBUTION=Ubuntu || true +} + +## Utility method to check ownership of folders/files +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If file is not owned by the user & group +## Parameters: + ## user + ## group + ## folder to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac +io_checkOwner () { + logSilly "Method ${FUNCNAME[0]}" + local osType=$(uname) + + if [ "${osType}" != "Linux" ]; then + logDebug "Unsupported OS. Skipping check" + return 0 + fi + + local file_to_check=$1 + local user_id_to_check=$2 + + + if [ -z "$user_id_to_check" ] || [ -z "$file_to_check" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group_id_to_check=${3:-$user_id_to_check} + local check_user_name=${4:-"no"} + + logDebug "Checking permissions on [$file_to_check] for user [$user_id_to_check] & group [$group_id_to_check]" + + local stat= + + if [ "${check_user_name}" == "yes" ]; then + stat=( $(stat -Lc "%U %G" ${file_to_check}) ) + else + stat=( $(stat -Lc "%u %g" ${file_to_check}) ) + fi + + local user_id=${stat[0]} + local group_id=${stat[1]} + + if [[ "${user_id}" != "${user_id_to_check}" ]] || [[ "${group_id}" != "${group_id_to_check}" ]] ; then + logDebug "Ownership mismatch. [${file_to_check}] is not owned by [${user_id_to_check}:${group_id_to_check}]" + return 1 + else + return 0 + fi +} + +## Utility method to change ownership of a file/folder - NON recursive +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnershipNonRecursive() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown ${user}:${group} ${targetFile}]" + chown ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to change ownership of a file. +## IMPORTANT +## If being called on a folder, should ONLY be called for fresh folders or may cause performance issues +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnership() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown -R ${user}:${group} ${targetFile}]" + chown -R ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to create third party folder structure necessary for Postgres +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## POSTGRESQL_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createPostgresDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${POSTGRESQL_DATA_ROOT}" ] && return 0 + + logDebug "Property [${POSTGRESQL_DATA_ROOT}] exists. Proceeding" + + createDir "${POSTGRESQL_DATA_ROOT}/data" + io_setOwnership "${POSTGRESQL_DATA_ROOT}" "${POSTGRES_USER}" "${POSTGRES_USER}" || errorExit "Setting ownership of [${POSTGRESQL_DATA_ROOT}] to [${POSTGRES_USER}:${POSTGRES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Nginx +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## NGINX_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createNginxDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${NGINX_DATA_ROOT}" ] && return 0 + + logDebug "Property [${NGINX_DATA_ROOT}] exists. Proceeding" + + createDir "${NGINX_DATA_ROOT}" + io_setOwnership "${NGINX_DATA_ROOT}" "${NGINX_USER}" "${NGINX_GROUP}" || errorExit "Setting ownership of [${NGINX_DATA_ROOT}] to [${NGINX_USER}:${NGINX_GROUP}] failed" +} + +## Utility method to create third party folder structure necessary for ElasticSearch +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## ELASTIC_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createElasticSearchDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${ELASTIC_DATA_ROOT}" ] && return 0 + + logDebug "Property [${ELASTIC_DATA_ROOT}] exists. Proceeding" + + createDir "${ELASTIC_DATA_ROOT}/data" + io_setOwnership "${ELASTIC_DATA_ROOT}" "${ES_USER}" "${ES_USER}" || errorExit "Setting ownership of [${ELASTIC_DATA_ROOT}] to [${ES_USER}:${ES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Redis +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## REDIS_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRedisDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${REDIS_DATA_ROOT}" ] && return 0 + + logDebug "Property [${REDIS_DATA_ROOT}] exists. Proceeding" + + createDir "${REDIS_DATA_ROOT}" + io_setOwnership "${REDIS_DATA_ROOT}" "${REDIS_USER}" "${REDIS_USER}" || errorExit "Setting ownership of [${REDIS_DATA_ROOT}] to [${REDIS_USER}:${REDIS_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Mongo +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## MONGODB_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createMongoDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${MONGODB_DATA_ROOT}" ] && return 0 + + logDebug "Property [${MONGODB_DATA_ROOT}] exists. Proceeding" + + createDir "${MONGODB_DATA_ROOT}/logs" + createDir "${MONGODB_DATA_ROOT}/configdb" + createDir "${MONGODB_DATA_ROOT}/db" + io_setOwnership "${MONGODB_DATA_ROOT}" "${MONGO_USER}" "${MONGO_USER}" || errorExit "Setting ownership of [${MONGODB_DATA_ROOT}] to [${MONGO_USER}:${MONGO_USER}] failed" +} + +## Utility method to create third party folder structure necessary for RabbitMQ +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## RABBITMQ_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRabbitMQDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${RABBITMQ_DATA_ROOT}" ] && return 0 + + logDebug "Property [${RABBITMQ_DATA_ROOT}] exists. Proceeding" + + createDir "${RABBITMQ_DATA_ROOT}" + io_setOwnership "${RABBITMQ_DATA_ROOT}" "${RABBITMQ_USER}" "${RABBITMQ_USER}" || errorExit "Setting ownership of [${RABBITMQ_DATA_ROOT}] to [${RABBITMQ_USER}:${RABBITMQ_USER}] failed" +} + +# Add or replace a property in provided properties file +addOrReplaceProperty() { + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + local delimiter=${4:-"="} + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}\s*${delimiter}.*$" ${propertiesPath} > /dev/null 2>&1 + [ $? -ne 0 ] && echo -e "\n${propertyName}${delimiter}${propertyValue}" >> ${propertiesPath} + sed -i -e "s|^${propertyName}\s*${delimiter}.*$|${propertyName}${delimiter}${propertyValue}|g;" ${propertiesPath} +} + +# Set property only if its not set +io_setPropertyNoOverride(){ + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}:" ${propertiesPath} > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${propertyName}: ${propertyValue}" >> ${propertiesPath} || warn "Setting property ${propertyName}: ${propertyValue} in [ ${propertiesPath} ] failed" + else + logger "Skipping update of property : ${propertyName}" >&6 + fi +} + +# Add a line to a file if it doesn't already exist +addLine() { + local line_to_add=$1 + local target_file=$2 + logger "Trying to add line $1 to $2" >&6 2>&1 + cat "$target_file" | grep -F "$line_to_add" -wq >&6 2>&1 + if [ $? != 0 ]; then + logger "Line does not exist and will be added" >&6 2>&1 + echo $line_to_add >> $target_file || errorExit "Could not update $target_file" + fi +} + +# Utility method to check if a value (first paramter) exists in an array (2nd parameter) +# 1st parameter "value to find" +# 2nd parameter "The array to search in. Please pass a string with each value separated by space" +# Example: containsElement "y" "y Y n N" +containsElement () { + local searchElement=$1 + local searchArray=($2) + local found=1 + for elementInIndex in "${searchArray[@]}";do + if [[ $elementInIndex == $searchElement ]]; then + found=0 + fi + done + return $found +} + +# Utility method to get user's choice +# 1st parameter "what to ask the user" +# 2nd parameter "what choices to accept, separated by spaces" +# 3rd parameter "what is the default choice (to use if the user simply presses Enter)" +# Example 'getUserChoice "Are you feeling lucky? Punk!" "y n Y N" "y"' +getUserChoice(){ + configureLogOutput + read_timeout=${read_timeout:-0.5} + local choice="na" + local text_to_display=$1 + local choices=$2 + local default_choice=$3 + users_choice= + + until containsElement "$choice" "$choices"; do + echo "";echo ""; + sleep $read_timeout #This ensures correct placement of the question. + read -p "$text_to_display :" choice + : ${choice:=$default_choice} + done + users_choice=$choice + echo -e "\n$text_to_display: $users_choice" >&6 + sleep $read_timeout #This ensures correct logging +} + +setFilePermission () { + local permission=$1 + local file=$2 + chmod "${permission}" "${file}" || warn "Setting permission ${permission} to file [ ${file} ] failed" +} + + +#setting required paths +setAppDir (){ + SCRIPT_DIR=$(dirname $0) + SCRIPT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + APP_DIR="`cd "${SCRIPT_HOME}";pwd`" +} + +ZIP_TYPE="zip" +COMPOSE_TYPE="compose" +HELM_TYPE="helm" +RPM_TYPE="rpm" +DEB_TYPE="debian" + +sourceScript () { + local file="$1" + + [ ! -z "${file}" ] || errorExit "target file is not passed to source a file" + + if [ ! -f "${file}" ]; then + errorExit "${file} file is not found" + else + source "${file}" || errorExit "Unable to source ${file}, please check if the user ${USER} has permissions to perform this action" + fi +} +# Source required helpers +initHelpers () { + local systemYamlHelper="${APP_DIR}/systemYamlHelper.sh" + local thirdPartyDir=$(find ${APP_DIR}/.. -name third-party -type d) + export YQ_PATH="${thirdPartyDir}/yq" + LIBXML2_PATH="${thirdPartyDir}/libxml2/bin/xmllint" + export LD_LIBRARY_PATH="${thirdPartyDir}/libxml2/lib" + sourceScript "${systemYamlHelper}" +} +# Check migration info yaml file available in the path +checkMigrationInfoYaml () { + + if [[ -f "${APP_DIR}/migrationHelmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationHelmInfo.yaml" + INSTALLER="${HELM_TYPE}" + elif [[ -f "${APP_DIR}/migrationZipInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationZipInfo.yaml" + INSTALLER="${ZIP_TYPE}" + elif [[ -f "${APP_DIR}/migrationRpmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationRpmInfo.yaml" + INSTALLER="${RPM_TYPE}" + elif [[ -f "${APP_DIR}/migrationDebInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationDebInfo.yaml" + INSTALLER="${DEB_TYPE}" + elif [[ -f "${APP_DIR}/migrationComposeInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationComposeInfo.yaml" + INSTALLER="${COMPOSE_TYPE}" + else + errorExit "File migration Info yaml does not exist in [${APP_DIR}]" + fi +} + +retrieveYamlValue () { + local yamlPath="$1" + local value="$2" + local output="$3" + local message="$4" + + [[ -z "${yamlPath}" ]] && errorExit "yamlPath is mandatory to get value from ${MIGRATION_SYSTEM_YAML_INFO}" + + getYamlValue "${yamlPath}" "${MIGRATION_SYSTEM_YAML_INFO}" "false" + value="${YAML_VALUE}" + if [[ -z "${value}" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "Empty value for ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + elif [[ "${output}" == "Skip" ]]; then + return + else + errorExit "${message}" + fi + fi +} + +checkEnv () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + # check Environment JF_PRODUCT_HOME is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_PRODUCT_HOME")" + if [[ -z "${NEW_DATA_DIR}" ]]; then + errorExit "Environment variable JF_PRODUCT_HOME is not set, this is required to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + getCustomDataDir_hook + NEW_DATA_DIR="${OLD_DATA_DIR}" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + else + # check Environment JF_ROOT_DATA_DIR is set before migration + OLD_DATA_DIR="$(evalVariable "OLD_DATA_DIR" "JF_ROOT_DATA_DIR")" + # check Environment JF_ROOT_DATA_DIR is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_ROOT_DATA_DIR")" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi + +} + +getDataDir () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}"|| "${INSTALLER}" == "${HELM_TYPE}" ]]; then + checkEnv + else + getCustomDataDir_hook + NEW_DATA_DIR="`cd "${APP_DIR}"/../../;pwd`" + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi +} + +# Retrieve Product name from MIGRATION_SYSTEM_YAML_INFO +getProduct () { + retrieveYamlValue "migration.product" "${YAML_VALUE}" "Fail" "Empty value under ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + PRODUCT="${YAML_VALUE}" + PRODUCT=$(echo "${PRODUCT}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + if [[ "${PRODUCT}" != "artifactory" && "${PRODUCT}" != "distribution" && "${PRODUCT}" != "xray" ]]; then + errorExit "migration.product in [${MIGRATION_SYSTEM_YAML_INFO}] is not correct, please set based on product as ARTIFACTORY or DISTRIBUTION" + fi + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + JF_USER="${PRODUCT}" + fi +} +# Compare product version with minProductVersion and maxProductVersion +migrateCheckVersion () { + local productVersion="$1" + local minProductVersion="$2" + local maxProductVersion="$3" + local productVersion618="6.18.0" + local unSupportedProductVersions7=("7.2.0 7.2.1") + + if [[ "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 1 ]]; then + logger "Migration not necessary. ${PRODUCT} is already ${productVersion}" + exit 11 + elif [[ "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 1 ]]; then + if [[ ("$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 1) && " ${unSupportedProductVersions7[@]} " =~ " ${CURRENT_VERSION} " ]]; then + touch /tmp/error; + errorExit "Current ${PRODUCT} version (${productVersion}) does not support migration to ${CURRENT_VERSION}" + else + bannerStart "Detected ${PRODUCT} ${productVersion}, initiating migration" + fi + else + logger "Current ${PRODUCT} ${productVersion} version is not supported for migration" + exit 1 + fi +} + +getProductVersion () { + local minProductVersion="$1" + local maxProductVersion="$2" + local newfilePath="$3" + local oldfilePath="$4" + local propertyInDocker="$5" + local property="$6" + local productVersion= + local status= + + if [[ "$INSTALLER" == "${COMPOSE_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + elif [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${propertyInDocker}" "${newfilePath}")" + status="fail" + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + exit 0 + fi + elif [[ "$INSTALLER" == "${HELM_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + else + productVersion="${CURRENT_VERSION}" + [[ -z "${productVersion}" || "${productVersion}" == "" ]] && logger "${PRODUCT} CURRENT_VERSION is not set" && exit 0 + fi + else + if [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${property}" "${newfilePath}")" + status="fail" + elif [[ -f "${oldfilePath}" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + status="success" + else + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + logger "File [${newfilePath}] not found to get current version." + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + fi + exit 0 + fi + fi + if [[ -z "${productVersion}" || "${productVersion}" == "" ]]; then + [[ "${status}" == "success" ]] && logger "No version found in file [${oldfilePath}]." + [[ "${status}" == "fail" ]] && logger "No version found in file [${newfilePath}]." + exit 0 + fi + + migrateCheckVersion "${productVersion}" "${minProductVersion}" "${maxProductVersion}" +} + +readKey () { + local property="$1" + local file="$2" + local version= + + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + version="${value}" && check=true && break + else + check=false + fi + done < "${file}" + if [[ "${check}" == "false" ]]; then + return + fi + echo "${version}" +} + +# create Log directory +createLogDir () { + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" + fi +} + +# Creating migration log file +creationMigrateLog () { + local LOG_FILE_NAME="migration.log" + createLogDir + local MIGRATION_LOG_FILE="${NEW_DATA_DIR}/log/${LOG_FILE_NAME}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + MIGRATION_LOG_FILE="${SCRIPT_HOME}/${LOG_FILE_NAME}" + fi + touch "${MIGRATION_LOG_FILE}" + setFilePermission "${LOG_FILE_PERMISSION}" "${MIGRATION_LOG_FILE}" + exec &> >(tee -a "${MIGRATION_LOG_FILE}") +} +# Set path where system.yaml should create +setSystemYamlPath () { + SYSTEM_YAML_PATH="${NEW_DATA_DIR}/etc/system.yaml" + if [[ "${INSTALLER}" != "${HELM_TYPE}" ]]; then + logger "system.yaml will be created in path [${SYSTEM_YAML_PATH}]" + fi +} +# Create directory +createDirectory () { + local directory="$1" + local output="$2" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${directory}" + mkdir -p "${directory}" && check=true || check=false + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi + setOwnershipBasedOnInstaller "${directory}" +} + +setOwnershipBasedOnInstaller () { + local directory="$1" + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + chown -R ${USER_TO_CHECK}:${GROUP_TO_CHECK} "${directory}" || warn "Setting ownership on $directory failed" + elif [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + io_setOwnership "${directory}" "${JF_USER}" "${JF_USER}" + fi +} + +getUserAndGroup () { + local file="$1" + read uid gid <<<$(stat -c '%U %G' ${file}) + USER_TO_CHECK="${uid}" + GROUP_TO_CHECK="${gid}" +} + +# set ownership +getUserAndGroupFromFile () { + case $PRODUCT in + artifactory) + getUserAndGroup "/etc/opt/jfrog/artifactory/artifactory.properties" + ;; + distribution) + getUserAndGroup "${OLD_DATA_DIR}/etc/versions.properties" + ;; + xray) + getUserAndGroup "${OLD_DATA_DIR}/security/master.key" + ;; + esac +} + +# creating required directories +createRequiredDirs () { + bannerSubSection "CREATING REQUIRED DIRECTORIES" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${JF_USER}" "${JF_USER}" "yes" + io_setOwnership "${NEW_DATA_DIR}" "${JF_USER}" "${JF_USER}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data/postgres" "${POSTGRES_USER}" "${POSTGRES_USER}" "yes" + fi + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + fi +} + +# Check entry in map is format +checkMapEntry () { + local entry="$1" + + [[ "${entry}" != *"="* ]] && echo -n "false" || echo -n "true" +} +# Check value Empty and warn +warnIfEmpty () { + local filePath="$1" + local yamlPath="$2" + local check= + + if [[ -z "${filePath}" ]]; then + warn "Empty value in yamlpath [${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + check=false + else + check=true + fi + echo "${check}" +} + +logCopyStatus () { + local status="$1" + local logMessage="$2" + local warnMessage="$3" + + [[ "${status}" == "success" ]] && logger "${logMessage}" + [[ "${status}" == "fail" ]] && warn "${warnMessage}" +} +# copy contents from source to destination +copyCmd () { + local source="$1" + local target="$2" + local mode="$3" + local status= + + case $mode in + unique) + cp -up "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + specific) + cp -pf "${source}" "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied file [${source}] to [${target}]" "Failed to copy file [${source}] to [${target}]" + ;; + patternFiles) + cp -pf "${source}"* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied files matching [${source}*] to [${target}]" "Failed to copy files matching [${source}*] to [${target}]" + ;; + full) + cp -prf "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + esac +} +# Check contents exist in source before copying +copyOnContentExist () { + local source="$1" + local target="$2" + local mode="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + copyCmd "${source}" "${target}" "${mode}" + else + logger "No contents to copy from [${source}]" + fi +} + +# move source to destination +moveCmd () { + local source="$1" + local target="$2" + local status= + + mv -f "${source}" "${target}" && status="success" || status="fail" + [[ "${status}" == "success" ]] && logger "Successfully moved directory [${source}] to [${target}]" + [[ "${status}" == "fail" ]] && warn "Failed to move directory [${source}] to [${target}]" +} + +# symlink target to source +symlinkCmd () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + local check=false + + if [[ "${symlinkSubDir}" == "subDir" ]]; then + ln -sf "${source}"/* "${target}" && check=true || check=false + else + ln -sf "${source}" "${target}" && check=true || check=false + fi + + [[ "${check}" == "true" ]] && logger "Successfully symlinked directory [${target}] to old [${source}]" + [[ "${check}" == "false" ]] && warn "Symlink operation failed" +} +# Check contents exist in source before symlinking +symlinkOnExist () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + if [[ "${symlinkSubDir}" == "subDir" ]]; then + symlinkCmd "${source}" "${target}" "subDir" + else + symlinkCmd "${source}" "${target}" + fi + else + logger "No contents to symlink from [${source}]" + fi +} + +prependDir () { + local absolutePath="$1" + local fullPath="$2" + local sourcePath= + + if [[ "${absolutePath}" = \/* ]]; then + sourcePath="${absolutePath}" + else + sourcePath="${fullPath}" + fi + echo "${sourcePath}" +} + +getFirstEntry (){ + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $1}' +} + +getSecondEntry () { + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $2}' +} +# To get absolutePath +pathResolver () { + local directoryPath="$1" + local dataDir= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Warning" + dataDir="${YAML_VALUE}" + cd "${dataDir}" + else + cd "${OLD_DATA_DIR}" + fi + absoluteDir="`cd "${directoryPath}";pwd`" + echo "${absoluteDir}" +} + +checkPathResolver () { + local value="$1" + + if [[ "${value}" == \/* ]]; then + value="${value}" + else + value="$(pathResolver "${value}")" + fi + echo "${value}" +} + +propertyMigrate () { + local entry="$1" + local filePath="$2" + local fileName="$3" + local check=false + + local yamlPath="$(getFirstEntry "${entry}")" + local property="$(getSecondEntry "${entry}")" + if [[ -z "${property}" ]]; then + warn "Property is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${property}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + value="$(migrateResolveDerbyPath "${key}" "${value}")" + value="$(migrateResolveHaDirPath "${key}" "${value}")" + value="$(updatePostgresUrlString_Hook "${yamlPath}" "${value}")" + fi + if [[ "${key}" == "context.url" ]]; then + local ip=$(echo "${value}" | awk -F/ '{print $3}' | sed 's/:.*//') + setSystemValue "shared.node.ip" "${ip}" "${SYSTEM_YAML_PATH}" + logger "Setting [shared.node.ip] with [${ip}] in system.yaml" + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" && logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" && check=true && break || check=false + fi + done < "${NEW_DATA_DIR}/${filePath}/${fileName}" + [[ "${check}" == "false" ]] && logger "Property [${property}] not found in file [${fileName}]" +} + +setHaEnabled_hook () { + echo "" +} + +migratePropertiesFiles () { + local fileList= + local filePath= + local fileName= + local map= + + retrieveYamlValue "migration.propertyFiles.files" "fileList" "Skip" + fileList="${YAML_VALUE}" + if [[ -z "${fileList}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF PROPERTY FILES" + for file in ${fileList}; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.propertyFiles.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.propertyFiles.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + if [[ "$(checkFileExists "${NEW_DATA_DIR}/${filePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + # setting haEnabled with true only if ha-node.properties is present + setHaEnabled_hook "${filePath}" + retrieveYamlValue "migration.propertyFiles.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + propertyMigrate "${entry}" "${filePath}" "${fileName}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=property" + fi + done + else + logger "File [${fileName}] was not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} + +createTargetDir () { + local mountDir="$1" + local target="$2" + + logger "Target directory not found [${mountDir}/${target}], creating it" + createDirectoryRecursive "${mountDir}" "${target}" "Warning" +} + +createDirectoryRecursive () { + local mountDir="$1" + local target="$2" + local output="$3" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${mountDir}/${target}" + local directory=$(echo "${target}" | tr '/' ' ' ) + local targetDir="${mountDir}" + for dir in ${directory}; + do + targetDir="${targetDir}/${dir}" + mkdir -p "${targetDir}" && check=true || check=false + setOwnershipBasedOnInstaller "${targetDir}" + done + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi +} + +copyOperation () { + local source="$1" + local target="$2" + local mode="$3" + local check=false + local targetDataDir= + local targetLink= + local date= + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + #remove source if it is a symlink + if [[ -L "${source}" ]]; then + targetLink=$(readlink -f "${source}") + logger "Removing the symlink [${source}] pointing to [${targetLink}]" + rm -f "${source}" + source=${targetLink} + fi + if [[ "$(checkDirExists "${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path" + return + fi + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + logger "No contents to copy from [${source}]" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyOnContentExist "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copySpecificFiles () { + local source="$1" + local target="$2" + local mode="$3" + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkFileExists "${source}")" != "true" ]]; then + logger "Source file [${source}] does not exist in path" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copyPatternMatchingFiles () { + local source="$1" + local target="$2" + local mode="$3" + local sourcePath="${4}" + + # prepend OLD_DATA_DIR only if source is relative path + sourcePath="$(prependDir "${sourcePath}" "${OLD_DATA_DIR}/${sourcePath}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkDirExists "${sourcePath}")" != "true" ]]; then + logger "Source [${sourcePath}] directory not found in path" + return + fi + if ls "${sourcePath}/${source}"* 1> /dev/null 2>&1; then + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${sourcePath}/${source}" "${targetDataDir}/${target}" "${mode}" + else + logger "Source file [${sourcePath}/${source}*] does not exist in path" + fi +} + +copyLogMessage () { + local mode="$1" + case $mode in + specific) + logger "Copy file [${source}] to target [${targetDataDir}/${target}]" + ;; + patternFiles) + logger "Copy files matching [${sourcePath}/${source}*] to target [${targetDataDir}/${target}]" + ;; + full) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + unique) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + esac +} + +copyBannerMessages () { + local mode="$1" + local textMode="$2" + case $mode in + specific) + bannerSection "COPY ${textMode} FILES" + ;; + patternFiles) + bannerSection "COPY MATCHING ${textMode}" + ;; + full) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + unique) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + esac +} + +invokeCopyFunctions () { + local mode="$1" + local source="$2" + local target="$3" + + case $mode in + specific) + copySpecificFiles "${source}" "${target}" "${mode}" + ;; + patternFiles) + retrieveYamlValue "migration.${copyFormat}.sourcePath" "map" "Warning" + local sourcePath="${YAML_VALUE}" + copyPatternMatchingFiles "${source}" "${target}" "${mode}" "${sourcePath}" + ;; + full) + copyOperation "${source}" "${target}" "${mode}" + ;; + unique) + copyOperation "${source}" "${target}" "${mode}" + ;; + esac +} +# Copies contents from source directory and target directory +copyDataDirectories () { + local copyFormat="$1" + local mode="$2" + local map= + local source= + local target= + local textMode= + local targetDataDir= + local copyFormatValue= + + retrieveYamlValue "migration.${copyFormat}" "${copyFormat}" "Skip" + copyFormatValue="${YAML_VALUE}" + if [[ -z "${copyFormatValue}" ]]; then + return + fi + textMode=$(echo "${mode}" | tr '[:lower:]' '[:upper:]' 2>/dev/null) + copyBannerMessages "${mode}" "${textMode}" + retrieveYamlValue "migration.${copyFormat}.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeCopyFunctions "${mode}" "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +invokeMoveFunctions () { + local source="$1" + local target="$2" + local sourceDataDir= + local targetBasename= + # prepend OLD_DATA_DIR only if source is relative path + sourceDataDir=$(prependDir "${source}" "${OLD_DATA_DIR}/${source}") + targetBasename=$(dirname "${target}") + logger "Moving directory source [${sourceDataDir}] to target [${NEW_DATA_DIR}/${target}]" + if [[ "$(checkDirExists "${sourceDataDir}")" != "true" ]]; then + logger "Directory [${sourceDataDir}] not found in path to move" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${targetBasename}")" != "true" ]]; then + createTargetDir "${NEW_DATA_DIR}" "${targetBasename}" + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/${target}" + else + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/tempDir" + moveCmd "${NEW_DATA_DIR}/tempDir" "${NEW_DATA_DIR}/${target}" + fi +} + +# Move source directory and target directory +moveDirectories () { + local moveDataDirectories= + local map= + local source= + local target= + + retrieveYamlValue "migration.moveDirectories" "moveDirectories" "Skip" + moveDirectories="${YAML_VALUE}" + if [[ -z "${moveDirectories}" ]]; then + return + fi + bannerSection "MOVE DIRECTORIES" + retrieveYamlValue "migration.moveDirectories.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeMoveFunctions "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +# Trim masterKey if its generated using hex 32 +trimMasterKey () { + local masterKeyDir=/opt/jfrog/artifactory/var/etc/security + local oldMasterKey=$(<${masterKeyDir}/master.key) + local oldMasterKey_Length=$(echo ${#oldMasterKey}) + local newMasterKey= + if [[ ${oldMasterKey_Length} -gt 32 ]]; then + bannerSection "TRIM MASTERKEY" + newMasterKey=$(echo ${oldMasterKey:0:32}) + cp ${masterKeyDir}/master.key ${masterKeyDir}/backup_master.key + logger "Original masterKey is backed up : ${masterKeyDir}/backup_master.key" + rm -rf ${masterKeyDir}/master.key + echo ${newMasterKey} > ${masterKeyDir}/master.key + logger "masterKey is trimmed : ${masterKeyDir}/master.key" + fi +} + +copyDirectories () { + + copyDataDirectories "copyFiles" "full" + copyDataDirectories "copyUniqueFiles" "unique" + copyDataDirectories "copySpecificFiles" "specific" + copyDataDirectories "copyPatternMatchingFiles" "patternFiles" +} + +symlinkDir () { + local source="$1" + local target="$2" + local targetDir= + local basename= + local targetParentDir= + + targetDir="$(dirname "${target}")" + if [[ "${targetDir}" == "${source}" ]]; then + # symlink the sub directories + createDirectory "${NEW_DATA_DIR}/${target}" "Warning" + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" "subDir" + basename="$(basename "${target}")" + cd "${NEW_DATA_DIR}/${target}" && rm -f "${basename}" + fi + else + targetParentDir="$(dirname "${NEW_DATA_DIR}/${target}")" + createDirectory "${targetParentDir}" "Warning" + if [[ "$(checkDirExists "${targetParentDir}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" + fi + fi +} + +symlinkOperation () { + local source="$1" + local target="$2" + local check=false + local targetLink= + local date= + + # Check if source is a link and do symlink + if [[ -L "${OLD_DATA_DIR}/${source}" ]]; then + targetLink=$(readlink -f "${OLD_DATA_DIR}/${source}") + symlinkOnExist "${targetLink}" "${NEW_DATA_DIR}/${target}" + else + # check if source is directory and do symlink + if [[ "$(checkDirExists "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path to symlink" + return + fi + if [[ "$(checkDirContents "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "No contents found in [${OLD_DATA_DIR}/${source}] to symlink" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" != "true" ]]; then + logger "Target directory [${NEW_DATA_DIR}/${target}] does not exist to create symlink, creating it" + symlinkDir "${source}" "${target}" + else + rm -rf "${NEW_DATA_DIR}/${target}" && check=true || check=false + [[ "${check}" == "false" ]] && warn "Failed to remove contents in [${NEW_DATA_DIR}/${target}/]" + symlinkDir "${source}" "${target}" + fi + fi +} +# Creates a symlink path - Source directory to which the symbolic link should point. +symlinkDirectories () { + local linkFiles= + local map= + local source= + local target= + + retrieveYamlValue "migration.linkFiles" "linkFiles" "Skip" + linkFiles="${YAML_VALUE}" + if [[ -z "${linkFiles}" ]]; then + return + fi + bannerSection "SYMLINK DIRECTORIES" + retrieveYamlValue "migration.linkFiles.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + logger "Symlink directory [${NEW_DATA_DIR}/${target}] to old [${OLD_DATA_DIR}/${source}]" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + symlinkOperation "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +updateConnectionString () { + local yamlPath="$1" + local value="$2" + local mongoPath="shared.mongo.url" + local rabbitmqPath="shared.rabbitMq.url" + local postgresPath="shared.database.url" + local redisPath="shared.redis.connectionString" + local mongoConnectionString="mongo.connectionString" + local sourceKey= + local hostIp=$(io_getPublicHostIP) + local hostKey= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + # Replace @postgres:,@mongodb:,@rabbitmq:,@redis: to @{hostIp}: (Compose Installer) + hostKey="@${hostIp}:" + case $yamlPath in + ${postgresPath}) + sourceKey="@postgres:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoPath}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${rabbitmqPath}) + sourceKey="@rabbitmq:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${redisPath}) + sourceKey="@redis:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoConnectionString}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + esac + fi + echo -n "${value}" +} + +yamlMigrate () { + local entry="$1" + local sourceFile="$2" + local value= + local yamlPath= + local key= + yamlPath="$(getFirstEntry "${entry}")" + key="$(getSecondEntry "${entry}")" + if [[ -z "${key}" ]]; then + warn "key is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + getYamlValue "${key}" "${sourceFile}" "false" + value="${YAML_VALUE}" + if [[ ! -z "${value}" ]]; then + value=$(updateConnectionString "${yamlPath}" "${value}") + fi + if [[ "${PRODUCT}" == "artifactory" ]]; then + replicatorProfiling + fi + if [[ -z "${value}" ]]; then + logger "No value for [${key}] in [${sourceFile}]" + else + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the key [${key}] in system.yaml" + fi +} + +migrateYamlFile () { + local files= + local filePath= + local fileName= + local sourceFile= + local map= + retrieveYamlValue "migration.yaml.files" "files" "Skip" + files="${YAML_VALUE}" + if [[ -z "${files}" ]]; then + return + fi + bannerSection "MIGRATION OF YAML FILES" + for file in $files; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.yaml.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.yaml.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + sourceFile="${NEW_DATA_DIR}/${filePath}/${fileName}" + if [[ "$(checkFileExists "${sourceFile}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + retrieveYamlValue "migration.yaml.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + yamlMigrate "${entry}" "${sourceFile}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done + else + logger "File [${fileName}] is not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} +# updates the key and value in system.yaml +updateYamlKeyValue () { + local entry="$1" + local value= + local yamlPath= + local key= + + yamlPath="$(getFirstEntry "${entry}")" + value="$(getSecondEntry "${entry}")" + if [[ -z "${value}" ]]; then + warn "value is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value [${value}] in system.yaml" +} + +updateSystemYamlFile () { + local updateYaml= + local map= + + retrieveYamlValue "migration.updateSystemYaml" "updateYaml" "Skip" + updateSystemYaml="${YAML_VALUE}" + if [[ -z "${updateSystemYaml}" ]]; then + return + fi + bannerSection "UPDATE SYSTEM YAML FILE WITH KEY AND VALUES" + retrieveYamlValue "migration.updateSystemYaml.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ -z "${map}" ]]; then + return + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + updateYamlKeyValue "${entry}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done +} + +backupFiles_hook () { + logSilly "Method ${FUNCNAME[0]}" +} + +backupDirectory () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyOnContentExist "${targetDir}" "${backupDirectory}/${dir}" "full" + fi +} + +removeOldDirectory () { + local backupDir="$1" + local entry="$2" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${entry}" "${OLD_DATA_DIR}/${entry}")" + local outputCheckDirExists="$(checkDirExists "${targetDir}")" + if [[ "${outputCheckDirExists}" != "true" ]]; then + logger "No [${targetDir}] directory found to delete" + echo ""; + return + fi + backupDirectory "${backupDir}" "${entry}" "${targetDir}" + rm -rf "${targetDir}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed directory [${targetDir}]" + [[ "${check}" == "false" ]] && warn "Failed to remove directory [${targetDir}]" + echo ""; +} + +cleanUpOldDataDirectories () { + local cleanUpOldDataDir= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldDataDir" "cleanUpOldDataDir" "Skip" + cleanUpOldDataDir="${YAML_VALUE}" + if [[ -z "${cleanUpOldDataDir}" ]]; then + return + fi + bannerSection "CLEAN UP OLD DATA DIRECTORIES" + retrieveYamlValue "migration.cleanUpOldDataDir.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old data configurations are backedup in [${backupDir}] directory ******" + backupFiles_hook "${backupDir}/${PRODUCT}" + for entry in $map; + do + removeOldDirectory "${backupDir}" "${entry}" + done +} + +backupFiles () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local fileName="$4" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyCmd "${targetDir}/${fileName}" "${backupDirectory}/${dir}" "specific" + fi +} + +removeOldFiles () { + local backupDir="$1" + local directoryName="$2" + local fileName="$3" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${directoryName}" "${OLD_DATA_DIR}/${directoryName}")" + local outputCheckFileExists="$(checkFileExists "${targetDir}/${fileName}")" + if [[ "${outputCheckFileExists}" != "true" ]]; then + logger "No [${targetDir}/${fileName}] file found to delete" + return + fi + backupFiles "${backupDir}" "${directoryName}" "${targetDir}" "${fileName}" + rm -f "${targetDir}/${fileName}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed file [${targetDir}/${fileName}]" + [[ "${check}" == "false" ]] && warn "Failed to remove file [${targetDir}/${fileName}]" + echo ""; +} + +cleanUpOldFiles () { + local cleanUpFiles= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldFiles" "cleanUpOldFiles" "Skip" + cleanUpOldFiles="${YAML_VALUE}" + if [[ -z "${cleanUpOldFiles}" ]]; then + return + fi + bannerSection "CLEAN UP OLD FILES" + retrieveYamlValue "migration.cleanUpOldFiles.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old files are backedup in [${backupDir}] directory ******" + for entry in $map; + do + local outputCheckMapEntry="$(checkMapEntry "${entry}")" + if [[ "${outputCheckMapEntry}" != "true" ]]; then + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e directoryName=fileName" + fi + local fileName="$(getSecondEntry "${entry}")" + local directoryName="$(getFirstEntry "${entry}")" + [[ -z "${fileName}" ]] && warn "File name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${directoryName}" ]] && warn "Directory name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + removeOldFiles "${backupDir}" "${directoryName}" "${fileName}" + echo ""; + done +} + +startMigration () { + bannerSection "STARTING MIGRATION" +} + +endMigration () { + bannerSection "MIGRATION COMPLETED SUCCESSFULLY" +} + +initialize () { + setAppDir + _pauseExecution "setAppDir" + initHelpers + _pauseExecution "initHelpers" + checkMigrationInfoYaml + _pauseExecution "checkMigrationInfoYaml" + getProduct + _pauseExecution "getProduct" + getDataDir + _pauseExecution "getDataDir" +} + +main () { + case $PRODUCT in + artifactory) + migrateArtifactory + ;; + distribution) + migrateDistribution + ;; + xray) + migrationXray + ;; + esac + exit 0 +} + +# Ensures meta data is logged +LOG_BEHAVIOR_ADD_META="$FLAG_Y" + + +migrateResolveDerbyPath () { + local key="$1" + local value="$2" + + if [[ "${key}" == "url" && "${value}" == *"db.home"* ]]; then + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + derbyPath="/opt/jfrog/artifactory/var/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + else + derbyPath="${NEW_DATA_DIR}/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + fi + fi + echo "${value}" +} + +migrateResolveHaDirPath () { + local key="$1" + local value="$2" + + if [[ "${INSTALLER}" == "${RPM_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" || "${INSTALLER}" == "${DEB_TYPE}" ]]; then + if [[ "${key}" == "artifactory.ha.data.dir" || "${key}" == "artifactory.ha.backup.dir" ]]; then + value=$(checkPathResolver "${value}") + fi + fi + echo "${value}" +} +updatePostgresUrlString_Hook () { + local yamlPath="$1" + local value="$2" + local hostIp=$(io_getPublicHostIP) + local sourceKey="//postgresql:" + if [[ "${yamlPath}" == "shared.database.url" ]]; then + value=$(io_replaceString "${value}" "${sourceKey}" "//${hostIp}:" "#") + fi + echo "${value}" +} +# Check Artifactory product version +checkArtifactoryVersion () { + local minProductVersion="6.0.0" + local maxProductVersion="7.0.0" + local propertyInDocker="ARTIFACTORY_VERSION" + local property="artifactory.version" + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + local newfilePath="${APP_DIR}/../.env" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + else + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="/etc/opt/jfrog/artifactory/artifactory.properties" + fi + + getProductVersion "${minProductVersion}" "${maxProductVersion}" "${newfilePath}" "${oldfilePath}" "${propertyInDocker}" "${property}" +} + +getCustomDataDir_hook () { + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Fail" + OLD_DATA_DIR="${YAML_VALUE}" +} + +# Get protocol value of connector +getXmlConnectorProtocol () { + local i="$1" + local filePath="$2" + local fileName="$3" + local protocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@protocol' ${filePath}/${fileName} 2>/dev/null |awk -F"=" '{print $2}' | tr -d '"') + echo -e "${protocolValue}" +} + +# Get all attributes of connector +getXmlConnectorAttributes () { + local i="$1" + local filePath="$2" + local fileName="$3" + local connectorAttributes=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@*' ${filePath}/${fileName} 2>/dev/null) + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + echo "${connectorAttributes}" +} + +# Get port value of connector +getXmlConnectorPort () { + local i="$1" + local filePath="$2" + local fileName="$3" + local portValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@port' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${portValue}" +} + +# Get maxThreads value of connector +getXmlConnectorMaxThreads () { + local i="$1" + local filePath="$2" + local fileName="$3" + local maxThreadValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@maxThreads' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${maxThreadValue}" +} +# Get sendReasonPhrase value of connector +getXmlConnectorSendReasonPhrase () { + local i="$1" + local filePath="$2" + local fileName="$3" + local sendReasonPhraseValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sendReasonPhrase' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${sendReasonPhraseValue}" +} +# Get relaxedPathChars value of connector +getXmlConnectorRelaxedPathChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedPathCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedPathChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedPathCharsValue=$(io_trim "${relaxedPathCharsValue}") + echo -e "${relaxedPathCharsValue}" +} +# Get relaxedQueryChars value of connector +getXmlConnectorRelaxedQueryChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedQueryCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedQueryChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedQueryCharsValue=$(io_trim "${relaxedQueryCharsValue}") + echo -e "${relaxedQueryCharsValue}" +} + +# Updating system.yaml with Connector port +setConnectorPort () { + local yamlPath="$1" + local valuePort="$2" + local portYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${valuePort}" ]]; then + warn "port value is empty, could not migrate to system.yaml" + return + fi + ## Getting port yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" portYamlPath "Warning" + portYamlPath="${YAML_VALUE}" + if [[ -z "${portYamlPath}" ]]; then + return + fi + setSystemValue "${portYamlPath}" "${valuePort}" "${SYSTEM_YAML_PATH}" + logger "Setting [${portYamlPath}] with value [${valuePort}] in system.yaml" +} + +# Updating system.yaml with Connector maxThreads +setConnectorMaxThread () { + local yamlPath="$1" + local threadValue="$2" + local maxThreadYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${threadValue}" ]]; then + return + fi + ## Getting max Threads yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" maxThreadYamlPath "Warning" + maxThreadYamlPath="${YAML_VALUE}" + if [[ -z "${maxThreadYamlPath}" ]]; then + return + fi + setSystemValue "${maxThreadYamlPath}" "${threadValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${maxThreadYamlPath}] with value [${threadValue}] in system.yaml" +} + +# Updating system.yaml with Connector sendReasonPhrase +setConnectorSendReasonPhrase () { + local yamlPath="$1" + local sendReasonPhraseValue="$2" + local sendReasonPhraseYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${sendReasonPhraseValue}" ]]; then + return + fi + ## Getting sendReasonPhrase yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" sendReasonPhraseYamlPath "Warning" + sendReasonPhraseYamlPath="${YAML_VALUE}" + if [[ -z "${sendReasonPhraseYamlPath}" ]]; then + return + fi + setSystemValue "${sendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${sendReasonPhraseYamlPath}] with value [${sendReasonPhraseValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedPathChars +setConnectorRelaxedPathChars () { + local yamlPath="$1" + local relaxedPathCharsValue="$2" + local relaxedPathCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedPathCharsValue}" ]]; then + return + fi + ## Getting relaxedPathChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedPathCharsYamlPath "Warning" + relaxedPathCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedPathCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedPathCharsYamlPath}" "${relaxedPathCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedPathCharsYamlPath}] with value [${relaxedPathCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedQueryChars +setConnectorRelaxedQueryChars () { + local yamlPath="$1" + local relaxedQueryCharsValue="$2" + local relaxedQueryCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedQueryCharsValue}" ]]; then + return + fi + ## Getting relaxedQueryChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedQueryCharsYamlPath "Warning" + relaxedQueryCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedQueryCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedQueryCharsYamlPath}" "${relaxedQueryCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedQueryCharsYamlPath}] with value [${relaxedQueryCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connectors configurations +setConnectorExtraConfig () { + local yamlPath="$1" + local connectorAttributes="$2" + local extraConfigPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${connectorAttributes}" ]]; then + return + fi + ## Getting extraConfig yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConfig "Warning" + extraConfigPath="${YAML_VALUE}" + if [[ -z "${extraConfigPath}" ]]; then + return + fi + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setSystemValue "${extraConfigPath}" "${connectorAttributes}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConfigPath}] with connector attributes in system.yaml" +} + +# Updating system.yaml with extra Connectors +setExtraConnector () { + local yamlPath="$1" + local extraConnector="$2" + local extraConnectorYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${extraConnector}" ]]; then + return + fi + ## Getting extraConnecotr yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConnectorYamlPath "Warning" + extraConnectorYamlPath="${YAML_VALUE}" + if [[ -z "${extraConnectorYamlPath}" ]]; then + return + fi + getYamlValue "${extraConnectorYamlPath}" "${SYSTEM_YAML_PATH}" "false" + local connectorExtra="${YAML_VALUE}" + if [[ -z "${connectorExtra}" ]]; then + setSystemValue "${extraConnectorYamlPath}" "${extraConnector}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + else + setSystemValue "${extraConnectorYamlPath}" "\"${connectorExtra} ${extraConnector}\"" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + fi +} + +# Migrate extra connectors to system.yaml +migrateExtraConnectors () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local excludeDefaultPort="$4" + local i="$5" + local extraConfig= + local extraConnector= + if [[ "${excludeDefaultPort}" == "yes" ]]; then + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" && "${portValue}" != "${DEFAULT_RT_PORT}" ]] || continue + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + done + else + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + fi +} + +# Migrate connector configurations +migrateConnectorConfig () { + local i="$1" + local protocolType="$2" + local portValue="$3" + local connectorPortYamlPath="$4" + local connectorMaxThreadYamlPath="$5" + local connectorAttributesYamlPath="$6" + local filePath="$7" + local fileName="$8" + local connectorSendReasonPhraseYamlPath="$9" + local connectorRelaxedPathCharsYamlPath="${10}" + local connectorRelaxedQueryCharsYamlPath="${11}" + + # migrate port + setConnectorPort "${connectorPortYamlPath}" "${portValue}" + + # migrate maxThreads + local maxThreadValue=$(getXmlConnectorMaxThreads "$i" "${filePath}" "${fileName}") + setConnectorMaxThread "${connectorMaxThreadYamlPath}" "${maxThreadValue}" + + # migrate sendReasonPhrase + local sendReasonPhraseValue=$(getXmlConnectorSendReasonPhrase "$i" "${filePath}" "${fileName}") + setConnectorSendReasonPhrase "${connectorSendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" + + # migrate relaxedPathChars + local relaxedPathCharsValue=$(getXmlConnectorRelaxedPathChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedPathChars "${connectorRelaxedPathCharsYamlPath}" "\"${relaxedPathCharsValue}\"" + # migrate relaxedQueryChars + local relaxedQueryCharsValue=$(getXmlConnectorRelaxedQueryChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedQueryChars "${connectorRelaxedQueryCharsYamlPath}" "\"${relaxedQueryCharsValue}\"" + + # migrate all attributes to extra config except port , maxThread , sendReasonPhrase ,relaxedPathChars and relaxedQueryChars + local connectorAttributes=$(getXmlConnectorAttributes "$i" "${filePath}" "${fileName}") + connectorAttributes=$(echo "${connectorAttributes}" | sed 's/port="'${portValue}'"//g' | sed 's/maxThreads="'${maxThreadValue}'"//g' | sed 's/sendReasonPhrase="'${sendReasonPhraseValue}'"//g' | sed 's/relaxedPathChars="\'${relaxedPathCharsValue}'\"//g' | sed 's/relaxedQueryChars="\'${relaxedQueryCharsValue}'\"//g') + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setConnectorExtraConfig "${connectorAttributesYamlPath}" "${connectorAttributes}" +} + +# Check for default port 8040 and 8081 in connectors and migrate +migrateConnectorPort () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + local connectorPortYamlPath="$5" + local connectorMaxThreadYamlPath="$6" + local connectorAttributesYamlPath="$7" + local connectorSendReasonPhraseYamlPath="$8" + local connectorRelaxedPathCharsYamlPath="$9" + local connectorRelaxedQueryCharsYamlPath="${10}" + local portYamlPath= + local maxThreadYamlPath= + local status= + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" == *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + RT_DEFAULTPORT_STATUS=success + else + AC_DEFAULTPORT_STATUS=success + fi + migrateConnectorConfig "${i}" "${protocolType}" "${portValue}" "${connectorPortYamlPath}" "${connectorMaxThreadYamlPath}" "${connectorAttributesYamlPath}" "${filePath}" "${fileName}" "${connectorSendReasonPhraseYamlPath}" "${connectorRelaxedPathCharsYamlPath}" "${connectorRelaxedQueryCharsYamlPath}" + done +} + +# migrate to extra, connector having default port and protocol is AJP +migrateDefaultPortIfAjp () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + done + +} + +# Comparing max threads in connectors +compareMaxThreads () { + local firstConnectorMaxThread="$1" + local firstConnectorNode="$2" + local secondConnectorMaxThread="$3" + local secondConnectorNode="$4" + local filePath="$5" + local fileName="$6" + + # choose higher maxThreads connector as Artifactory. + if [[ "${firstConnectorMaxThread}" -gt ${secondConnectorMaxThread} || "${firstConnectorMaxThread}" -eq ${secondConnectorMaxThread} ]]; then + # maxThread is higher in firstConnector, + # Taking firstConnector as Artifactory and SecondConnector as Access + # maxThread is equal in both connector,considering firstConnector as Artifactory and SecondConnector as Access + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + else + # maxThread is higher in SecondConnector, + # Taking SecondConnector as Artifactory and firstConnector as Access + local rtPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +# Check max threads exist to compare +maxThreadsExistToCompare () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local firstConnectorMaxThread= + local secondConnectorMaxThread= + local firstConnectorNode= + local secondConnectorNode= + local status=success + local firstnode=fail + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ ${protocolType} == *AJP* ]]; then + # Migrate Connectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + fi + # store maxthreads value of each connector + if [[ ${firstnode} == "fail" ]]; then + firstConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + firstConnectorNode="${i}" + firstnode=success + else + secondConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + secondConnectorNode="${i}" + fi + done + [[ -z "${firstConnectorMaxThread}" ]] && status=fail + [[ -z "${secondConnectorMaxThread}" ]] && status=fail + # maxThreads is set, now compare MaxThreads + if [[ "${status}" == "success" ]]; then + compareMaxThreads "${firstConnectorMaxThread}" "${firstConnectorNode}" "${secondConnectorMaxThread}" "${secondConnectorNode}" "${filePath}" "${fileName}" + else + # Assume first connector is RT, maxThreads is not set in both connectors + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +migrateExtraBasedOnNonAjpCount () { + local nonAjpCount="$1" + local filePath="$2" + local fileName="$3" + local connectorCount="$4" + local i="$5" + + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ "${protocolType}" == *AJP* ]]; then + if [[ "${nonAjpCount}" -eq 1 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + else + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + continue + fi + fi +} + +# find RT and AC Connector +findRtAndAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local initialAjpCount=0 + local nonAjpCount=0 + + # get the count of non AJP + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] || continue + nonAjpCount=$((initialAjpCount+1)) + initialAjpCount="${nonAjpCount}" + done + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access and artifactory connectors + # Mark port as 8040 for access + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + done + elif [[ "${nonAjpCount}" -eq 2 ]]; then + # compare maxThreads in both connectors + maxThreadsExistToCompare "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${nonAjpCount}" -gt 2 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # setting with default port in system.yaml + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# get the count of non AJP +getCountOfNonAjp () { + local port="$1" + local connectorCount="$2" + local filePath=$3 + local fileName=$4 + local initialNonAjpCount=0 + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${port}" ]] || continue + [[ "${protocolType}" != *AJP* ]] || continue + local nonAjpCount=$((initialNonAjpCount+1)) + initialNonAjpCount="${nonAjpCount}" + done + echo -e "${nonAjpCount}" +} + +# Find for access connector +findAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_RT_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access connector and mark port as that of connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take RT properties into access with 8040 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add RT connector details as access connector and mark port as 8040 + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# Find for artifactory connector +findRtConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_ACCESS_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as RT connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take access properties into artifactory with 8081 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add access connector details as RT connector and mark as ${DEFAULT_RT_PORT} + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +checkForTlsConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + local sslProtocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sslProtocol' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${sslProtocolValue}" == "TLS" ]]; then + bannerImportant "NOTE: Ignoring TLS connector during migration, modify the system yaml to enable TLS. Original server.xml is saved in path [${filePath}/${fileName}]" + TLS_CONNECTOR_EXISTS=${FLAG_Y} + continue + fi + done +} + +# set custom tomcat server Listeners to system.yaml +setListenerConnector () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + for ((i = 1 ; i <= "${listenerCount}" ; i++)) + do + local listenerConnector=$($LIBXML2_PATH --xpath '//Server/Listener['$i']' ${filePath}/${fileName} 2>/dev/null) + local listenerClassName=$($LIBXML2_PATH --xpath '//Server/Listener['$i']/@className' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${listenerClassName}" == *Apr* ]]; then + setExtraConnector "${EXTRA_LISTENER_CONFIG_YAMLPATH}" "${listenerConnector}" + fi + done +} +# add custom tomcat server Listeners +addTomcatServerListeners () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + if [[ "${listenerCount}" == "0" ]]; then + logger "No listener connectors found in the [${filePath}/${fileName}],skipping migration of listener connectors" + else + setListenerConnector "${filePath}" "${fileName}" "${listenerCount}" + setSystemValue "${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}" "true" "${SYSTEM_YAML_PATH}" + logger "Setting [${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}] with value [true] in system.yaml" + fi +} + +# server.xml migration operations +xmlMigrateOperation () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local listenerCount="$4" + RT_DEFAULTPORT_STATUS=fail + AC_DEFAULTPORT_STATUS=fail + TLS_CONNECTOR_EXISTS=${FLAG_N} + + # Check for connector with TLS , if found ignore migrating it + checkForTlsConnector "${filePath}" "${fileName}" "${connectorCount}" + if [[ "${TLS_CONNECTOR_EXISTS}" == "${FLAG_Y}" ]]; then + return + fi + addTomcatServerListeners "${filePath}" "${fileName}" "${listenerCount}" + # Migrate RT default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + # Migrate to extra if RT default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" + # Migrate AC default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + # Migrate to extra if access default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" + + if [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # RT and AC default port found + logger "Artifactory 8081 and Access 8040 default port are found" + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # Only AC default port found,find RT connector + logger "Found Access default 8040 port" + findRtConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # Only RT default port found,find AC connector + logger "Found Artifactory default 8081 port" + findAcConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # RT and AC default port not found, find connector + logger "Artifactory 8081 and Access 8040 default port are not found" + findRtAndAcConnector "${filePath}" "${fileName}" "${connectorCount}" + fi +} + +# get count of connectors +getXmlConnectorCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Service/Connector)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# get count of listener connectors +getTomcatServerListenersCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Listener)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# Migrate server.xml configuration to system.yaml +migrateXmlFile () { + local xmlFiles= + local fileName= + local filePath= + local sourceFilePath= + DEFAULT_ACCESS_PORT="8040" + DEFAULT_RT_PORT="8081" + AC_PORT_YAMLPATH="migration.xmlFiles.serverXml.access.port" + AC_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.access.maxThreads" + AC_SENDREASONPHRASE_YAMLPATH="migration.xmlFiles.serverXml.access.sendReasonPhrase" + AC_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.access.extraConfig" + RT_PORT_YAMLPATH="migration.xmlFiles.serverXml.artifactory.port" + RT_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.artifactory.maxThreads" + RT_SENDREASONPHRASE_YAMLPATH='migration.xmlFiles.serverXml.artifactory.sendReasonPhrase' + RT_RELAXEDPATHCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedPathChars' + RT_RELAXEDQUERYCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedQueryChars' + RT_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.artifactory.extraConfig" + ROUTER_PORT_YAMLPATH="migration.xmlFiles.serverXml.router.port" + EXTRA_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.config" + EXTRA_LISTENER_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.listener" + RT_TOMCAT_HTTPSCONNECTOR_ENABLED="artifactory.tomcat.httpsConnector.enabled" + + retrieveYamlValue "migration.xmlFiles" "xmlFiles" "Skip" + xmlFiles="${YAML_VALUE}" + if [[ -z "${xmlFiles}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF XML FILES" + retrieveYamlValue "migration.xmlFiles.serverXml.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + if [[ -z "${fileName}" ]]; then + return + fi + bannerSubSection "Processing Migration of $fileName" + retrieveYamlValue "migration.xmlFiles.serverXml.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + if [[ -z "${filePath}" ]]; then + return + fi + # prepend NEW_DATA_DIR only if filePath is relative path + sourceFilePath=$(prependDir "${filePath}" "${NEW_DATA_DIR}/${filePath}") + if [[ "$(checkFileExists "${sourceFilePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] is found in path [${sourceFilePath}]" + local connectorCount=$(getXmlConnectorCount "${sourceFilePath}" "${fileName}") + if [[ "${connectorCount}" == "0" ]]; then + logger "No connectors found in the [${filePath}/${fileName}],skipping migration of xml configuration" + return + fi + local listenerCount=$(getTomcatServerListenersCount "${sourceFilePath}" "${fileName}") + xmlMigrateOperation "${sourceFilePath}" "${fileName}" "${connectorCount}" "${listenerCount}" + else + logger "File [${fileName}] is not found in path [${sourceFilePath}] to migrate" + fi +} + +compareArtifactoryUser () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + + if [[ "${oldPropertyValue}" != "${newPropertyValue}" ]]; then + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" + else + logger "No change in property [${property}] value in [${sourceFile}] to migrate" + fi +} + +migrateReplicator () { + local property="$1" + local oldPropertyValue="$2" + local yamlPath="$3" + + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" +} + +compareJavaOptions () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + local oldJavaOption= + local newJavaOption= + local extraJavaOption= + local check=false + local success=true + local status=true + + oldJavaOption=$(echo "${oldPropertyValue}" | awk 'BEGIN{FS=OFS="\""}{for(i=2;i> /etc/hosts" +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${MEMBER_SERVICE_IP} {{ template "artifactory-ha.fullname" . }}\" >> /etc/hosts" +{{- end }} + +3. Launch jconsole: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.primary.javaOpts.jmx.port }} +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/_helpers.tpl new file mode 100644 index 000000000..8f9dc060c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/_helpers.tpl @@ -0,0 +1,127 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory-ha.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The primary node name +*/}} +{{- define "artifactory-ha.primary.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-primary" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-primary" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +The member node name +*/}} +{{- define "artifactory-ha.node.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-member" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-member" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory-ha.nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified Replicator app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.replicator.fullname" -}} +{{- if .Values.artifactory.replicator.ingress.name -}} +{{- .Values.artifactory.replicator.ingress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.nginx.fullname" -}} +{{- if .Values.nginx.fullnameOverride -}} +{{- .Values.nginx.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nginx.name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory-ha.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory-ha.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory-ha.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory-ha.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory-ha.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory-ha.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory-ha.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Scheme (http/https) based on Access TLS enabled/disabled +*/}} +{{- define "artifactory-ha.scheme" -}} +{{- if .Values.access.accessConfig.security.tls -}} +{{- printf "%s" "https" -}} +{{- else -}} +{{- printf "%s" "http" -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/admin-bootstrap-creds.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/admin-bootstrap-creds.yaml new file mode 100644 index 000000000..b344e86b6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/admin-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} +{{- if .Values.artifactory.admin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "%s@%s=%s" .Values.artifactory.admin.username .Values.artifactory.admin.ip .Values.artifactory.admin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-access-config.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-access-config.yaml new file mode 100644 index 000000000..6552d45dc --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-access-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.access.accessConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-access-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + access.config.import.yml: | +{{ tpl (toYaml .Values.access.accessConfig) . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-binarystore-secret.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-binarystore-secret.yaml new file mode 100644 index 000000000..2e7cac758 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-binarystore + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-configmaps.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-configmaps.yaml new file mode 100644 index 000000000..1385bc578 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + labels: + app: {{ template "artifactory-ha.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-custom-secrets.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-custom-secrets.yaml new file mode 100644 index 000000000..c22188a64 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.artifactory.customSecrets }} +{{- range .Values.artifactory.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + component: "{{ $.Values.artifactory.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-database-secrets.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-database-secrets.yaml new file mode 100644 index 000000000..9ff71855f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-database-secrets.yaml @@ -0,0 +1,22 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-database-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-gcp-credentials-secret.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-gcp-credentials-secret.yaml new file mode 100644 index 000000000..c6a2682c8 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-gcp-credentials-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} +{{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-gcpcreds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + gcp.credentials.json: |- +{{ tpl .Values.artifactory.persistence.googleStorage.gcpServiceAccount.config . | indent 4 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-installer-info.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-installer-info.yaml new file mode 100644 index 000000000..e58ec41b3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-installer-info.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + {{ tpl .Values.installerInfo . }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-license-secret.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-license-secret.yaml new file mode 100644 index 000000000..3f629c6e4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-license + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-migration-scripts.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-migration-scripts.yaml new file mode 100644 index 000000000..fe40f980f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-migration-scripts.yaml @@ -0,0 +1,18 @@ +{{- if .Values.artifactory.migration.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + migrate.sh: | +{{ .Files.Get "files/migrate.sh" | indent 4 }} + migrationHelmInfo.yaml: | +{{ .Files.Get "files/migrationHelmInfo.yaml" | indent 4 }} + migrationStatus.sh: | +{{ .Files.Get "files/migrationStatus.sh" | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-networkpolicy.yaml new file mode 100644 index 000000000..371dc9a5f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-nfs-pvc.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-nfs-pvc.yaml new file mode 100644 index 000000000..6ed7d82f6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-nfs-pvc.yaml @@ -0,0 +1,101 @@ +{{- if eq .Values.artifactory.persistence.type "nfs" }} +### Artifactory HA data +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-data-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haDataMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-data-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +--- +### Artifactory HA backup +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-backup-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haBackupMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-backup-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-pdb.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-pdb.yaml new file mode 100644 index 000000000..d588e0a85 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-pdb.yaml @@ -0,0 +1,20 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-node + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.artifactory.name }} + app: {{ template "artifactory-ha.name" . }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.node.minAvailable }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-statefulset.yaml new file mode 100644 index 000000000..af5ce966d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-node-statefulset.yaml @@ -0,0 +1,711 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.node.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.node.name" . }} + replicas: {{ .Values.artifactory.node.replicaCount }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.node.name" . }} + heritage: {{ .Release.Service }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled }} + - name: "wait-for-primary" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + echo "Waiting for primary node to be ready..."; + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled .Values.artifactory.node.waitForPrimaryStartup.time }} + echo "Sleeping to allow time for primary node to come up"; + sleep {{ .Values.artifactory.node.waitForPrimaryStartup.time }}; + {{- else }} + while [ "$(wget --spider --no-check-certificate -S -T 3 {{ include "artifactory-ha.scheme" . }}://{{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.externalPort }}/ 2>&1 | grep '^ HTTP/' | awk '{print $2}')" != "200" ]; + do echo "Primary not ready. Waiting..."; sleep 3; + done; + echo "Primary node ready!"; + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + echo "Removing join.key file"; + rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/security/join.key; + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys - load from database"; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Load custom certificates from database"; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.artifactory.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + env: + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: "{{ .Values.artifactory.masterKeySecretName | default (include "artifactory-ha.fullname" .) }}" + key: master-key + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ default .Chart.AppVersion .Values.artifactory.image.version }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + {{- with .Values.artifactory.node.preStartCommand }} + echo "Running member node specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo; + {{- with .Values.artifactory.postStartCommand }} + {{ tpl . $ }} + {{- end }} + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.node.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.node.javaOpts.jmx.port }} + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.node.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.node.affinity }} + {{- with .Values.artifactory.node.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- end }} + {{- with .Values.artifactory.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.node.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-primary-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-primary-statefulset.yaml new file mode 100644 index 000000000..9de929da2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-primary-statefulset.yaml @@ -0,0 +1,835 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + version: {{ default .Chart.AppVersion .Values.artifactory.image.version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory/CHANGELOG.md), pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true if you are upgrading from chart version which has postgresql version 9.6.x." .Values.databaseUpgradeReady | quote }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.primary.name" . }} + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.primary.name" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.access.accessConfig }} + checksum/access-config: {{ include (print $.Template.BasePath "/artifactory-access-config.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} + checksum/admin-creds: {{ include (print $.Template.BasePath "/admin-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.customInitContainersBegin }} +{{ tpl .Values.artifactory.customInitContainersBegin . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found; + rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + subPath: {{ .Values.artifactory.admin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + {{- if .Values.access.accessConfig }} + echo "Copy access.config.latest.yml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -fv /tmp/etc/access.config.import.yml {{ .Values.artifactory.persistence.mountPath }}/etc/access/access.config.import.yml; + {{- end }} + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + touch {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/reset_ca_keys; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Copying custom certificates to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + cp -fv /tmp/etc/tls.crt {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.crt; + cp -fv /tmp/etc/tls.key {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.private.key; + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.artifactory.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security; + echo -n ${ARTIFACTORY_JOIN_KEY} > {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security/join.key; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.artifactory.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + {{- end }} + env: + {{- if or .Values.artifactory.joinKey .Values.artifactory.joinKeySecretName}} + - name: ARTIFACTORY_JOIN_KEY + valueFrom: + secretKeyRef: + name: "{{ .Values.artifactory.joinKeySecretName | default (include "artifactory-ha.fullname" .) }}" + key: join-key + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.artifactory.masterKeySecretName }} + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: "{{ .Values.artifactory.masterKeySecretName | default (include "artifactory-ha.fullname" .) }}" + key: master-key + {{- end }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.access.accessConfig }} + - name: access-config + mountPath: "/tmp/etc/access.config.import.yml" + subPath: access.config.import.yml + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + mountPath: "/tmp/etc/tls.crt" + subPath: tls.crt + - name: access-certs + mountPath: "/tmp/etc/tls.key" + subPath: tls.key + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ default .Chart.AppVersion .Values.artifactory.image.version }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + if [ -d /artifactory_extra_conf ] && [ -d /artifactory_bootstrap ]; then + echo "Copying bootstrap config from /artifactory_extra_conf to /artifactory_bootstrap"; + cp -Lrfv /artifactory_extra_conf/ /artifactory_bootstrap/; + fi; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_bootstrap/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /artifactory_bootstrap/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- with .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.artifactory.primary.preStartCommand }} + echo "Running primary specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo; + {{- with .Values.artifactory.postStartCommand }} + {{ tpl . $ }} + {{- end }} + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.primary.javaOpts.jmx.port }} + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + mountPath: "/artifactory_bootstrap/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.primary.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.primary.affinity }} + {{- with .Values.artifactory.primary.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.artifactory.primary.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + secretName: {{ .Values.artifactory.admin.secret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory-ha.primary.name" . }}-system-yaml + {{- if .Values.access.accessConfig }} + - name: access-config + secret: + secretName: {{ template "artifactory-ha.fullname" . }}-access-config + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + secret: + secretName: {{ .Values.access.customCertificatesSecretName }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.primary.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-priority-class.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-priority-class.yaml new file mode 100644 index 000000000..417ec5c06 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-role.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-role.yaml new file mode 100644 index 000000000..c86bffddd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-rolebinding.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-rolebinding.yaml new file mode 100644 index 000000000..4412870b1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory-ha.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory-ha.fullname" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-secrets.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-secrets.yaml new file mode 100644 index 000000000..260e516b9 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: +{{- if and .Values.artifactory.masterKey (not .Values.artifactory.masterKeySecretName) }} + master-key: {{ .Values.artifactory.masterKey | b64enc | quote }} +{{- end }} +{{- if and .Values.artifactory.joinKey (not .Values.artifactory.joinKeySecretName) }} + join-key: {{ .Values.artifactory.joinKey | b64enc | quote }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-service.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-service.yaml new file mode 100644 index 000000000..68b467ea5 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-service.yaml @@ -0,0 +1,107 @@ +# Service for all Artifactory cluster nodes. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml .| indent 4 }} + {{- end }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + {{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: router + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: ssh + {{- end }} + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: artifactory + {{- with .Values.artifactory.node.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: +{{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} +{{- end }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} +--- +# Internal service for Artifactory primary node only! +# Used by member nodes to check readiness of primary node before starting up +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +spec: + # Statically setting service type to ClusterIP since this is an internal only service + type: ClusterIP + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: artifactory + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: ssh + {{- end }} + {{- with .Values.artifactory.primary.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: + role: {{ template "artifactory-ha.primary.name" . }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-serviceaccount.yaml new file mode 100644 index 000000000..6983c1d12 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.serviceAccountName" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-storage-pvc.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-storage-pvc.yaml new file mode 100644 index 000000000..e0bfa6b11 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-storage-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.artifactory.customPersistentVolumeClaim }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + labels: + app: {{ template "artifactory-ha.name" . }} + version: "{{ .Values.artifactory.version }}" + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + accessModes: + {{- range .Values.artifactory.customPersistentVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentVolumeClaim.size | quote }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-system-yaml.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-system-yaml.yaml new file mode 100644 index 000000000..cf8ffbac8 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/artifactory-system-yaml.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.primary.name" . }}-system-yaml + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/filebeat-configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..d2db2a067 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.name" . }}-filebeat-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/ingress.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/ingress.yaml new file mode 100644 index 000000000..262647f9c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/ingress.yaml @@ -0,0 +1,102 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- $ingressName := default ( include "artifactory-ha.fullname" . ) .Values.ingress.name -}} +{{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $ingressName }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- if .Values.artifactory.replicator.enabled }} +--- +{{- $replicationIngressName := default ( include "artifactory-ha.replicator.fullname" . ) .Values.artifactory.replicator.ingress.name -}} +{{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $replicationIngressName }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.ingress.annotations }} + annotations: +{{ .Values.artifactory.replicator.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.ingress.hosts }} + {{- range $host := .Values.artifactory.replicator.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: /replicator/ + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: /artifactory/api/replication/replicate/file/streaming + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.ingress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/logger-configmap.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/logger-configmap.yaml new file mode 100644 index 000000000..87fe8999e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-logger + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-artifactory-conf.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-artifactory-conf.yaml new file mode 100644 index 000000000..eb1f0e698 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-certificate-secret.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-certificate-secret.yaml new file mode 100644 index 000000000..2c1430a18 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory-ha.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-conf.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-conf.yaml new file mode 100644 index 000000000..5f424d52a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-deployment.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-deployment.yaml new file mode 100644 index 000000000..4327e7479 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-deployment.yaml @@ -0,0 +1,192 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: {{ .Values.nginx.kind }} +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: +{{- if ne .Values.nginx.kind "DaemonSet" }} + replicas: {{ .Values.nginx.replicaCount }} +{{- end }} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: '{{ .Values.nginx.image.repository }}:{{ default .Chart.AppVersion .Values.nginx.image.version }}' + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.nginx.ssh.internalPort }} + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + resources: +{{ toYaml $.Values.nginx.loggersResources | indent 10 }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + {{- end }} + + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory-ha.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pdb.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pdb.yaml new file mode 100644 index 000000000..8310377a6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.nginx.name }} + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.nginx.minAvailable }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pvc.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pvc.yaml new file mode 100644 index 000000000..68a89cea0 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled) (eq (int .Values.nginx.replicaCount) 1) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClass }} +{{- if (eq "-" .Values.nginx.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-service.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-service.yaml new file mode 100644 index 000000000..d0e10e248 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/templates/nginx-service.yaml @@ -0,0 +1,79 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + {{- if .Values.nginx.service.labels }} +{{ toYaml .Values.nginx.service.labels | indent 4 }} + {{- end }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} + {{- if and (eq .Values.nginx.service.type "ClusterIP") .Values.nginx.service.clusterIP }} + clusterIP: {{ .Values.nginx.service.clusterIP }} + {{- end }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later verion + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if or .Values.nginx.https.enabled .Values.nginx.service.ssloffload }} + - port: {{ .Values.nginx.https.externalPort }} + {{- if .Values.nginx.service.ssloffload }} + targetPort: {{ .Values.nginx.http.internalPort }} + {{- else }} + targetPort: {{ .Values.nginx.https.internalPort}} + {{- end }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.nginx.ssh.externalPort }} + targetPort: {{ .Values.nginx.ssh.internalPort }} + protocol: TCP + name: ssh + {{- end }} + selector: + app: {{ template "artifactory-ha.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/values-large.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-large.yaml new file mode 100644 index 000000000..ec05d2add --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-large.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" + node: + replicaCount: 3 + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/values-medium.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-medium.yaml new file mode 100644 index 000000000..33879c00b --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-medium.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" + node: + replicaCount: 2 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/values-small.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-small.yaml new file mode 100644 index 000000000..4babf97cb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/values-small.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" + node: + replicaCount: 1 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/charts/artifactory-ha/artifactory-ha/3.0.1400/values.yaml b/charts/artifactory-ha/artifactory-ha/3.0.1400/values.yaml new file mode 100644 index 000000000..f0ff3a1c5 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/3.0.1400/values.yaml @@ -0,0 +1,1551 @@ +# Default values for artifactory-ha. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# Common +initContainerImage: docker.bintray.io/alpine:3.12 + +installer: + type: + platform: + +installerInfo: '{"productId": "Helm_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + +# For supporting pulling from private registries +imagePullSecrets: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory-ha + + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the postgresql dependency +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 10.13.0-debian-10-r38 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlExtendedConf: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + master: + nodeSelector: {} + affinity: {} + tolerations: [] + slave: + nodeSelector: {} + affinity: {} + tolerations: [] + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## you MUST specify custom database details here or Artifactory will NOT start +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "rds-artifactory" + # key: "db-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +logger: + image: + repository: docker.bintray.io/busybox + tag: 1.31.1 + +# Artifactory +artifactory: + name: artifactory-ha + # Note that by default we use appVersion to get image tag/version + image: + repository: docker.bintray.io/jfrog/artifactory-pro + # version: + pullPolicy: IfNotPresent + + # Create a priority class for the Artifactory pods or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 200 + extraConfig: 'acceptCount="100"' + + # This directory is intended for use with NFS eventual configuration for HA + haDataDir: + enabled: false + path: + haBackupDir: + enabled: false + path: + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_bootstrap/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + # # Absolute path + # - source: /artifactory_bootstrap/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - access-audit.log + # - access-request.log + # - access-security-audit.log + # - access-service.log + # - artifactory-access.log + # - artifactory-event.log + # - artifactory-import-export.log + # - artifactory-request.log + # - artifactory-service.log + # - frontend-request.log + # - frontend-service.log + # - metadata-request.log + # - metadata-service.log + # - router-request.log + # - router-service.log + # - router-traefik.log + # - derby.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - tomcat-catalina.log + # - tomcat-localhost.log + + # Tomcat (catalina) loggers resources + catalinaLoggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 + ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + ## Add custom init containers + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # runAsUser: 0 + # fsGroup: 0 + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key! + ## You can generate one with the command: "openssl rand -hex 32" + ## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}' + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + ## IMPORTANT: This is a mandatory for fresh Install of 7.x (App version) + # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + # masterKeySecretName: + + ## Join Key to connect to other services to Artifactory. + ## IMPORTANT: Setting this value overrides the existing joinKey + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + + # Add custom secrets - secret per file + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + + binarystore: + enabled: true + + ## admin allows to set the password for the default admin user. + ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate + admin: + ip: "127.0.0.1" + username: "admin" + password: + secret: + dataKey: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + extraEnvironmentVariables: + # - name: SERVER_XML_ARTIFACTORY_PORT + # value: "8081" + # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS + # value: "200" + # - name: SERVER_XML_ACCESS_MAX_THREADS + # value: "50" + # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_ACCESS_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_EXTRA_CONNECTOR + # value: "" + # - name: DB_POOL_MAX_ACTIVE + # value: "100" + # - name: DB_POOL_MAX_IDLE + # value: "10" + # - name: MY_SECRET_ENV_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret-name + # key: my-secret-key + + # TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes) + systemYaml: | + shared: + logging: + consoleLog: + enabled: {{ .Values.artifactory.consoleLog }} + extraJavaOpts: > + -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }} + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}" + host: "" + driver: org.postgresql.Driver + username: "{{ .Values.postgresql.postgresqlUsername }}" + {{ else }} + type: "{{ .Values.database.type }}" + driver: "{{ .Values.database.driver }}" + {{- end }} + artifactory: + {{- if or .Values.artifactory.haDataDir.enabled .Values.artifactory.haBackupDir.enabled }} + node: + {{- if .Values.artifactory.haDataDir.path }} + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + {{- if .Values.artifactory.haBackupDir.path }} + haBackupDir: {{ .Values.artifactory.haBackupDir.path }} + {{- end }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }} + access: + database: + maxOpenConnections: {{ .Values.access.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.access.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.access.tomcat.connector.extraConfig }} + {{- if .Values.access.database.enabled }} + type: "{{ .Values.access.database.type }}" + url: "{{ .Values.access.database.url }}" + driver: "{{ .Values.access.database.driver }}" + username: "{{ .Values.access.database.user }}" + password: "{{ .Values.access.database.password }}" + {{- end }} + metadata: + database: + maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }} + {{- if .Values.artifactory.replicator.enabled }} + replicator: + enabled: true + {{- end }} + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + terminationGracePeriodSeconds: 30 + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 90 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + enabled: true + local: false + redundancy: 3 + mountPath: "/var/opt/jfrog/artifactory" + accessMode: ReadWriteOnce + size: 200Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + + maxCacheSize: 50000000000 + cacheProviderDir: cache + eventual: + numberOfThreads: 10 + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## aws-s3-v3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .maxConnections }} + {{ . }} + {{- end }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + {{- if .useInstanceCredentials }} + true + {{- else }} + false + {{- end }} + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type file-system + fileSystem: + ## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes. + ## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1) + ## Create PVCs with ReadWriteMany that match the naming convetions: + ## {{ template "artifactory-ha.fullname" . }}-data-pvc- + ## {{ template "artifactory-ha.fullname" . }}-backup-pvc + ## Example (using numberOfExistingClaims: 2) + ## myexample-data-pvc-0 + ## myexample-data-pvc-1 + ## myexample-backup-pvc + ## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive. + + ## Need to have the following set + existingSharedClaim: + enabled: false + numberOfExistingClaims: 1 + ## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }} + dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data" + backupDir: "/var/opt/jfrog/artifactory-backup" + + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory-ha" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + mountOptions: [] + ## For artifactory.persistence.type google-storage + googleStorage: + ## When using GCP buckets as your binary store (Available with enterprise license only) + gcpServiceAccount: + enabled: false + ## Use either an existing secret prepared in advance or put the config (replace the content) in the values + ## ref: https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#google-storage + # customSecretName: + # config: | + # { + # "type": "service_account", + # "project_id": "", + # "private_key_id": "?????", + # "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + # "client_email": "???@j.iam.gserviceaccount.com", + # "client_id": "???????", + # "auth_uri": "https://accounts.google.com/o/oauth2/auth", + # "token_uri": "https://oauth2.googleapis.com/token", + # "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + # "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + # } + endpoint: storage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-ha-gcp" + identity: + credential: + path: "artifactory-ha/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + maxConnections: 50 + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-ha-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory-ha/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: "AWS4-HMAC-SHA256" + + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them) + ## Supported pool values + ## members + ## all + pool: members + + ## The following Java options are passed to the java process running Artifactory. + ## This will be passed to all cluster members. Primary and member nodes. + javaOpts: {} + # other: "" + + ## The following setting are to configure a dedicated Ingress object for Replicator service + replicator: + enabled: false + ingress: + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + + ssh: + enabled: false + internalPort: 1339 + externalPort: 1339 + + annotations: {} + + ## Type specific configurations. + ## There is a difference between the primary and the member nodes. + ## Customising their resources and java parameters is done here. + primary: + name: artifactory-ha-primary + # preStartCommand specific to the primary node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0` + existingClaim: false + ## Resources for the primary node + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory primary node. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + nodeSelector: {} + + tolerations: [] + + affinity: {} + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + + node: + name: artifactory-ha-member + # preStartCommand specific to the member node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0` + existingClaim: false + replicaCount: 2 + minAvailable: 1 + ## Resources for the member nodes + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory member nodes. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + # xms: "1g" + # xmx: "2g" + # other: "" + nodeSelector: {} + + ## Wait for Artifactory primary + waitForPrimaryStartup: + enabled: true + + ## Setting time will override the built in test and will just wait the set time + time: + + tolerations: [] + + ## Complete specification of the "affinity" of the member nodes; if this is non-empty, + ## "podAntiAffinity" values are not used. + affinity: {} + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + +access: + ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. + ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates + ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes. + ## This ensures that the node to node communication is done over TLS. + accessConfig: + security: + tls: false + + ## You can use a pre-existing secret by specifying customCertificatesSecretName + ## Example : Create a tls secret using `kubectl create secret tls --cert=ca.crt --key=ca.private.key` + # customCertificatesSecretName: + + ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key + # resetAccessCAKeys: false + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 50 + extraConfig: 'acceptCount="100"' + +metadata: + database: + maxOpenConnections: 80 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + + +# Nginx +nginx: + enabled: true + kind: Deployment + name: nginx + labels: {} + replicaCount: 1 + minAvailable: 0 + uid: 104 + gid: 107 + # Note that by default we use appVersion to get image tag/version + image: + repository: docker.bintray.io/jfrog/nginx-artifactory-pro + # version: + pullPolicy: IfNotPresent + + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + + # Logs options + logs: + stderr: false + level: warn + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + + {{ if .Values.nginx.logs.stderr }} + error_log stderr {{ .Values.nginx.logs.level }}; + {{- else -}} + error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }}; + {{- end }} + pid /tmp/nginx.pid; + + {{- if .Values.artifactory.ssh.enabled }} + ## SSH Server Configuration + stream { + server { + listen {{ .Values.nginx.ssh.internalPort }}; + proxy_pass {{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }}; + } + } + {{- end }} + + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 128k; + proxy_buffers 40 128k; + proxy_busy_buffers_size 128k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + + artifactoryConf: | + {{- if .Values.nginx.https.enabled }} + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + {{- end }} + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ . }} + {{- end -}} + {{- end -}}; + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + {{- if .Values.nginx.service.ssloffload}} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host; + {{- else }} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + {{- end }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + ssloffload: false + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + labels: {} + # label-key: label-value + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + # DEPRECATED: The following will be replaced by L1065-L1076 in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ssh: + internalPort: 1339 + externalPort: 1339 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 120 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.5.1 + logstashUrl: "logstash:5044" + + terminationGracePeriod: 10 + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/.helmignore b/charts/artifactory-ha/artifactory-ha/4.13.000/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/CHANGELOG.md b/charts/artifactory-ha/artifactory-ha/4.13.000/CHANGELOG.md new file mode 100644 index 000000000..8d287d283 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/CHANGELOG.md @@ -0,0 +1,1048 @@ +# JFrog Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file. + +## [4.13.0] - April 5, 2021 +* **IMPORTANT** +* Added `charts.jfrog.io` as default JFrog Helm repository +* Updated Artifactory version to 7.17.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.17.5) + +## [4.12.2] - Mar 31, 2021 +* Updated Artifactory version to 7.17.4 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.17.4) + +## [4.12.1] - Mar 30, 2021 +* Updated Artifactory version to 7.17.3 +* Add `timeoutSeconds` to all exec probes - Please refer [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + +## [4.12.0] - Mar 24, 2021 +* Updated Artifactory version to 7.17.2 +* Optimized startupProbe time + +## [4.11.0] - Mar 18, 2021 +* Add support to startupProbe + +## [4.10.0] - Mar 15, 2021 +* Updated Artifactory version to 7.16.3 + +## [4.9.5] - Mar 09, 2021 +* Added HSTS header to nginx conf + +## [4.9.4] - Mar 9, 2021 +* Removed bintray URL references in the chart + +## [4.9.3] - Mar 04, 2021 +* Updated Artifactory version to 7.15.4 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.15.4) + +## [4.9.2] - Mar 04, 2021 +* Fixed creation of nginx-certificate-secret when Nginx is disabled + +## [4.9.1] - Feb 19, 2021 +* Update busybox tag version to `1.32.1` + +## [4.9.0] - Feb 18, 2021 +* Updated Artifactory version to 7.15.3 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.15.3) +* Add option to specify update strategy for Artifactory statefulset + +## [4.8.1] - Feb 11, 2021 +* Exposed "multiPartLimit" and "multipartElementSize" for the Azure Blob Storage Binary Provider + +## [4.8.0] - Feb 08, 2021 +* Updated Artifactory version to 7.12.8 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.12.8) +* Support for custom certificates using secrets +* **Important:** Switched docker images download from `docker.bintray.io` to `releases-docker.jfrog.io` +* Update alpine tag version to `3.13.1` + +## [4.7.9] - Feb 3, 2021 +* Fix copyOnEveryStartup for HA cluster license + +## [4.7.8] - Jan 25, 2021 +* Add support for hostAliases + +## [4.7.7] - Jan 11, 2021 +* Fix failures when using creds file for configurating google storage + +## [4.7.6] - Jan 11, 2021 +* Updated Artifactory version to 7.12.6 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.12.6) + +## [4.7.5] - Jan 07, 2021 +* Added support for optional tracker dedicated ingress `.Values.artifactory.replicator.trackerIngress.enabled` (defaults to false) + +## [4.7.4] - Jan 04, 2021 +* Fixed gid support for statefulset + +## [4.7.3] - Dec 31, 2020 +* Added gid support for statefulset +* Add setSecurityContext flag to allow securityContext block to be removed from artifactory statefulset + +## [4.7.2] - Dec 29, 2020 +* **Important:** Removed `.Values.metrics` and `.Values.fluentd` (Fluentd and Prometheus integrations) +* Add support for creating additional kubernetes resources - [refer here](https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-ha-values.yaml) +* Updated Artifactory version to 7.12.5 + +## [4.7.1] - Dec 21, 2020 +* Updated Artifactory version to 7.12.3 + +## [4.7.0] - Dec 18, 2020 +* Updated Artifactory version to 7.12.2 +* Added `.Values.artifactory.openMetrics.enabled` + +## [4.6.1] - Dec 11, 2020 +* Added configurable `.Values.global.versions.artifactory` in values.yaml + +## [4.6.0] - Dec 10, 2020 +* Update postgresql tag version to `12.5.0-debian-10-r25` +* Fixed `artifactory.persistence.googleStorage.endpoint` from `storage.googleapis.com` to `commondatastorage.googleapis.com` +* Updated chart maintainers email + +## [4.5.5] - Dec 4, 2020 +* **Important:** Renamed `.Values.systemYaml` to `.Values.systemYamlOverride` + +## [4.5.4] - Dec 1, 2020 +* Improve error message returned when attempting helm upgrade command + +## [4.5.3] - Nov 30, 2020 +* Updated Artifactory version to 7.11.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) + +# [4.5.2] - Nov 23, 2020 +* Updated Artifactory version to 7.11.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) +* Updated port namings on services and pods to allow for istio protocol discovery +* Change semverCompare checks to support hosted Kubernetes +* Add flag to disable creation of ServiceMonitor when enabling prometheus metrics +* Prevent the PostHook command to be executed if the user did not specify a command in the values file +* Fix issue with tls file generation when nginx.https.enabled is false + +## [4.5.1] - Nov 19, 2020 +* Updated Artifactory version to 7.11.2 +* Bugfix - access.config.import.xml override Access Federation configurations + +## [4.5.0] - Nov 17, 2020 +* Updated Artifactory version to 7.11.1 +* Update alpine tag version to `3.12.1` + +## [4.4.6] - Nov 10, 2020 +* Pass system.yaml via external secret for advanced usecases +* Added support for custom ingress +* Bugfix - stateful set not picking up changes to database secrets + +## [4.4.5] - Nov 9, 2020 +* Updated Artifactory version to 7.10.6 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.6) + +## [4.4.4] - Nov 2, 2020 +* Add enablePathStyleAccess property for aws-s3-v3 binary provider template + +## [4.4.3] - Nov 2, 2020 +* Updated Artifactory version to 7.10.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.5) + +## [4.4.2] - Oct 22, 2020 +* Chown bug fix where Linux capability cannot chown all files causing log line warnings +* Fix Frontend timeout linting issue + +## [4.4.1] - Oct 20, 2020 +* Add flag to disable prepare-custom-persistent-volume init container + +## [4.4.0] - Oct 19, 2020 +* Updated Artifactory version to 7.10.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.2) + +## [4.3.4] - Oct 19, 2020 +* Add support to specify priorityClassName for nginx deployment + +## [4.3.3] - Oct 15, 2020 +* Fixed issue with node PodDisruptionBudget which also getting applied on the primary +* Fix mandatory masterKey check issue when upgrading from 6.x to 7.x + +## [4.3.2] - Oct 14, 2020 +* Add support to allow more than 1 Primary in Artifactory-ha STS + +## [4.3.1] - Oct 9, 2020 +* Add global support for customInitContainersBegin + +## [4.3.0] - Oct 07, 2020 +* Updated Artifactory version to 7.9.1 +* **Breaking change:** Fix `storageClass` to correct `storageClassName` in values.yaml + +## [4.2.0] - Oct 5, 2020 +* Expose Prometheus metrics via a ServiceMonitor +* Parse log files for metric data with Fluentd + +## [4.1.0] - Sep 30, 2020 +* Updated Artifactory version to 7.9.0 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.9) + +## [4.0.12] - Sep 25, 2020 +* Update to use linux capability CAP_CHOWN instead of root base init container to avoid any use of root containers to pass Redhat security requirements + +## [4.0.11] - Sep 28, 2020 +* Setting chart coordinates in migitation yaml + +## [4.0.10] - Sep 25, 2020 +* Update filebeat version to `7.9.2` + +## [4.0.9] - Sep 24, 2020 +* Fixed broken issue - when setting `waitForDatabase:false` container startup still waits for DB + +## [4.0.8] - Sep 22, 2020 +* Updated readme + +## [4.0.7] - Sep 22, 2020 +* Fix lint issue in migitation yaml + +## [4.0.6] - Sep 22, 2020 +* Fix broken migitation yaml + +## [4.0.5] - Sep 21, 2020 +* Added mitigation yaml for Artifactory - [More info](https://github.com/jfrog/chartcenter/blob/master/docs/securitymitigationspec.md) + +## [4.0.4] - Sep 17, 2020 +* Added configurable session(UI) timeout in frontend microservice + +## [4.0.3] - Sep 17, 2020 +* Fix small typo in README and added proper required text to be shown while postgres upgrades + +## [4.0.2] - Sep 14, 2020 +* Updated Artifactory version to 7.7.8 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7.8) + +## [4.0.1] - Sep 8, 2020 +* Added support for artifactory pro license (single node) installation. + +## [4.0.0] - Sep 2, 2020 +* **Breaking change:** Changed `imagePullSecrets` value from string to list +* **Breaking change:** Added `image.registry` and changed `image.version` to `image.tag` for docker images +* Added support for global values +* Updated maintainers in chart.yaml +* Update postgresql tag version to `12.3.0-debian-10-r71` +* Update postgresqlsub chart version to `9.3.4` - [9.x Upgrade Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#900) +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass previous 9.x/10.x's postgresql.image.tag and databaseUpgradeReady=true. + +## [3.1.0] - Aug 13, 2020 +* Updated Artifactory version to 7.7.3 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7) + +## [3.0.15] - Aug 10, 2020 +* Added enableSignedUrlRedirect for persistent storage type aws-s3-v3. + +## [3.0.14] - Jul 31, 2020 +* Update the README section on Nginx SSL termination to reflect the actual YAML structure. + +## [3.0.13] - Jul 30, 2020 +* Added condition to disable the migration scripts. + +## [3.0.12] - Jul 29, 2020 +* Document Artifactory node affinity. + +## [3.0.11] - Jul 28, 2020 +* Added maxConnections for persistent storage type aws-s3-v3. + +## [3.0.10] - Jul 28, 2020 +Bugfix / support for userPluginSecrets with Artifactory 7 + +## [3.0.9] - Jul 27, 2020 +* Add tpl to external database secrets. +* Modified `scheme` to `artifactory-ha.scheme` + +## [3.0.8] - Jul 23, 2020 +* Added condition to disable the migration init container. + +## [3.0.7] - Jul 21, 2020 +* Updated Artifactory-ha Chart to add node and primary labels to pods and service objects. + +## [3.0.6] - Jul 20, 2020 +* Support custom CA and certificates + +## [3.0.5] - Jul 13, 2020 +* Updated Artifactory version to 7.6.3 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.3 +* Fixed Mysql database jar path in `preStartCommand` in README + +## [3.0.4] - Jul 8, 2020 +* Move some postgresql values to where they should be according to the subchart + +## [3.0.3] - Jul 8, 2020 +* Set Artifactory access client connections to the same value as the access threads. + +## [3.0.2] - Jul 6, 2020 +* Updated Artifactory version to 7.6.2 +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [3.0.1] - Jul 01, 2020 +* Add dedicated ingress object for Replicator service when enabled + +## [3.0.0] - Jun 30, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update busybox tag version to `1.31.1` +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [2.6.0] - Jun 29, 2020 +* Updated Artifactory version to 7.6.1 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.1 +* Add tpl for external database secrets + +## [2.5.8] - Jun 25, 2020 +* Stop loading the Nginx stream module because it is now a core module + +## [2.5.7] - Jun 18, 2020 +* Fixes bootstrap configMap issue on member node + +## [2.5.6] - Jun 11, 2020 +* Support list of custom secrets + +## [2.5.5] - Jun 11, 2020 +* NOTES.txt fixed incorrect information + +## [2.5.4] - Jun 12, 2020 +* Updated Artifactory version to 7.5.7 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5.7 + +## [2.5.3] - Jun 8, 2020 +* Statically setting primary service type to ClusterIP. +* Prevents primary service from being exposed publicly when using LoadBalancer type on cloud providers. + +## [2.5.2] - Jun 8, 2020 +* Readme update - configuring Artifactory with oracledb + +## [2.5.1] - Jun 5, 2020 +* Fixes broken PDB issue upgrading from 6.x to 7.x + +## [2.5.0] - Jun 1, 2020 +* Updated Artifactory version to 7.5.5 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5 +* Fixes bootstrap configMap permission issue +* Update postgresql tag version to `9.6.18-debian-10-r7` + +## [2.4.10] - May 27, 2020 +* Added Tomcat maxThreads & acceptCount + +## [2.4.9] - May 25, 2020 +* Fixed postgresql README `image` Parameters + +## [2.4.8] - May 24, 2020 +* Fixed typo in README regarding migration timeout + +## [2.4.7] - May 19, 2020 +* Added metadata maxOpenConnections + +## [2.4.6] - May 07, 2020 +* Fix `installerInfo` string format + +## [2.4.5] - Apr 27, 2020 +* Updated Artifactory version to 7.4.3 + +## [2.4.4] - Apr 27, 2020 +* Change customInitContainers order to run before the "migration-ha-artifactory" initContainer + +## [2.4.3] - Apr 24, 2020 +* Fix `artifactory.persistence.awsS3V3.useInstanceCredentials` incorrect conditional logic +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [2.4.2] - Apr 16, 2020 +* Custom volume mounts in migration init container. + +## [2.4.1] - Apr 16, 2020 +* Fix broken support for gcpServiceAccount for googleStorage + +## [2.4.0] - Apr 14, 2020 +* Updated Artifactory version to 7.4.1 + +## [2.3.1] - April 13, 2020 +* Update README with helm v3 commands + +## [2.3.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml +* Bump postgresql tag version to `9.6.17-debian-10-r21` in values.yaml + +## [2.2.11] - Apr 8, 2020 +* Added recommended ingress annotation to avoid 413 errors + +## [2.2.10] - Apr 8, 2020 +* Moved migration scripts under `files` directory +* Support preStartCommand in migration Init container as `artifactory.migration.preStartCommand` + +## [2.2.9] - Apr 01, 2020 +* Support masterKey and joinKey as secrets + +## [2.2.8] - Apr 01, 2020 +* Ensure that the join key is also copied when provided by an external secret +* Migration container in primary and node statefulset now respects custom versions and the specified node/primary resources + +## [2.2.7] - Apr 01, 2020 +* Added cache-layer in chain definition of Google Cloud Storage template +* Fix readme use to `-hex 32` instead of `-hex 16` + +## [2.2.6] - Mar 31, 2020 +* Change the way the artifactory `command:` is set so it will properly pass a SIGTERM to java + +## [2.2.5] - Mar 31, 2020 +* Removed duplicate `artifactory-license` volume from primary node + +## [2.2.4] - Mar 31, 2020 +* Restore `artifactory-license` volume for the primary node + +## [2.2.3] - Mar 29, 2020 +* Add Nginx log options: stderr as logfile and log level + +## [2.2.2] - Mar 30, 2020 +* Apply initContainers.resources to `copy-system-yaml`, `prepare-custom-persistent-volume`, and `migration-artifactory-ha` containers +* Use the same defaulting mechanism used for the artifactory version used elsewhere in the chart +* Removed duplicate `artifactory-license` volume that prevented using an external secret + +## [2.2.1] - Mar 29, 2020 +* Fix loggers sidecars configurations to support new file system layout and new log names + +## [2.2.0] - Mar 29, 2020 +* Fix broken admin user bootstrap configuration +* **Breaking change:** renamed `artifactory.accessAdmin` to `artifactory.admin` + +## [2.1.3] - Mar 24, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [2.1.2] - Mar 21, 2020 +* Support for SSL offload in Nginx service(LoadBalancer) layer. Introduced `nginx.service.ssloffload` field with boolean type. + +## [2.1.1] - Mar 23, 2020 +* Moved installer info to values.yaml so it is fully customizable + +## [2.1.0] - Mar 23, 2020 +* Updated Artifactory version to 7.3.2 + +## [2.0.36] - Mar 20, 2020 +* Add support GCP credentials.json authentication + +## [2.0.35] - Mar 20, 2020 +* Add support for masterKey trim during 6.x to 7.x migration if 6.x masterKey is 32 hex (64 characters) + +## [2.0.34] - Mar 19, 2020 +* Add support for NFS directories `haBackupDir` and `haDataDir` + +## [2.0.33] - Mar 18, 2020 +* Increased Nginx proxy_buffers size + +## [2.0.32] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files +* useInstanceCredentials variable was declared in S3 settings but not used in chart. Now it is being used. + +## [2.0.31] - Mar 17, 2020 +* Fix rendering of Service Account annotations + +## [2.0.30] - Mar 16, 2020 +* Add Unsupported message from 6.18 to 7.2.x (migration) + +## [2.0.29] - Mar 11, 2020 +* Upgrade Docs update + +## [2.0.28] - Mar 11, 2020 +* Unified charts public release + +## [2.0.27] - Mar 8, 2020 +* Add an optional wait for primary node to be ready with a proper test for http status + +## [2.0.23] - Mar 6, 2020 +* Fix path to `/artifactory_bootstrap` +* Add support for controlling the name of the ingress and allow to set more than one cname + +## [2.0.22] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [2.0.21] - Feb 28, 2020 +* Add support to process `valueFrom` for extraEnvironmentVariables + +## [2.0.20] - Feb 26, 2020 +* Store join key to secret + +## [2.0.19] - Feb 26, 2020 +* Updated Artifactory version to 7.2.1 + +## [2.0.12] - Feb 07, 2020 +* Remove protection flag `databaseUpgradeReady` which was added to check internal postgres upgrade + +## [2.0.0] - Feb 07, 2020 +* Updated Artifactory version to 7.0.0 + +## [1.4.10] - Feb 13, 2020 +* Add support for SSH authentication to Artifactory + +## [1.4.9] - Feb 10, 2020 +* Fix custom DB password indention + +## [1.4.8] - Feb 9, 2020 +* Add support for `tpl` in the `postStartCommand` + +## [1.4.7] - Feb 4, 2020 +* Support customisable Nginx kind + +## [1.4.6] - Feb 2, 2020 +* Add a comment stating that it is recommended to use an external PostgreSQL with a static password for production installations + +## [1.4.5] - Feb 2, 2020 +* Add support for primary or member node specific preStartCommand + +## [1.4.4] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [1.4.3] - Jan 26, 2020 +* Improve `database.user` and `database.password` logic in order to support more use cases and make the configuration less repetitive + +## [1.4.2] - Jan 22, 2020 +* Refined pod disruption budgets to separate nginx and Artifactory pods + +## [1.4.1] - Jan 19, 2020 +* Fix replicator port config in nginx replicator configmap + +## [1.4.0] - Jan 19, 2020 +* Updated Artifactory version to 6.17.0 + +## [1.3.8] - Jan 16, 2020 +* Added example for external nginx-ingress + +## [1.3.7] - Jan 07, 2020 +* Add support for customizable `mountOptions` of NFS PVs + +## [1.3.6] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [1.3.5] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [1.3.4] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.3] - Dec 16, 2019 +* Another fix for toggling nginx service ports + +## [1.3.2] - Dec 12, 2019 +* Fix for toggling nginx service ports + +## [1.3.1] - Dec 10, 2019 +* Add support for toggling nginx service ports + +## [1.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [1.2.4] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [1.2.3] - Nov 27, 2019 +* Add support for PriorityClass + +## [1.2.2] - Nov 20, 2019 +* Update Artifactory logo + +## [1.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [1.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [1.1.12] - Nov 17, 2019 +* Fix `README.md` format (broken table) + +## [1.1.11] - Nov 17, 2019 +* Update comment on Artifactory master key + +## [1.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [1.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [1.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [1.1.7] - Nov 11, 2019 +* Additional documentation for masterKey + +## [1.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [1.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [1.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [1.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [1.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [1.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [1.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [1.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [1.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [0.17.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [0.17.2] - Oct 21, 2019 +* Add support for setting `artifactory.primary.labels` +* Add support for setting `artifactory.node.labels` +* Add support for setting `nginx.labels` + +## [0.17.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [0.17.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [0.16.7] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [0.16.6] - Sep 24, 2019 +* Add support for setting `nginx.service.labels` + +## [0.16.5] - Sep 23, 2019 +* Add support for setting `artifactory.customInitContainersBegin` + +## [0.16.4] - Sep 20, 2019 +* Add support for setting `initContainers.resources` + +## [0.16.3] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [0.16.2] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [0.16.1] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [0.16.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [0.15.15] - Aug 18, 2019 +* Fix existingSharedClaim permissions issue and example + +## [0.15.14] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [0.15.13] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [0.15.12] - Aug 6, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [0.15.11] - Aug 5, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [0.15.10] - Aug 5, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [0.15.9] - Aug 1, 2019 +* Fix masterkey/masterKeySecretName not specified warning render logic in NOTES.txt + +## [0.15.8] - Jul 28, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [0.15.7] - Jul 25, 2019 +* Updated README about how to apply Artifactory licenses + +## [0.15.6] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [0.15.5] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [0.15.4] - Jul 11, 2019 +* Add `artifactory.customVolumeMounts` support to member node statefulset template + +## [0.15.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [0.15.2] - Jul 3, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [0.15.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [0.15.0] - Jun 27, 2019 +* Updated Artifactory version to 6.11.0 and Restart Primary node when bootstrap.creds file has been modified in artifactory-ha + +## [0.14.4] - Jun 24, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [0.14.3] - Jun 24, 2019 +* Update chart maintainers + +## [0.14.2] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [0.14.1] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [0.14.0] - Jun 20, 2019 +* Use ConfigMaps for nginx configuration and remove nginx postStart command + +## [0.13.10] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [0.13.9] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [0.13.8] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [0.13.7] - Jun 6, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [0.13.6] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [0.13.5] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [0.13.4] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [0.13.3] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [0.13.2] - May 19, 2019 +* Fix missing logger image tag + +## [0.13.1] - May 15, 2019 +* Support `artifactory.persistence.cacheProviderDir` for on-premise cluster + +## [0.13.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [0.12.23] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [0.12.22] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [0.12.21] - Apr 30, 2019 +* Add support for JMX monitoring + +## [0.12.20] - Apr29, 2019 +* Added support for headless services + +## [0.12.19] - Apr 28, 2019 +* Added support for `cacheProviderDir` + +## [0.12.18] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [0.12.17] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [0.12.16] - Apr 12, 2019 +* Added support for `customVolumeMounts` + +## [0.12.15] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [0.12.14] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [0.12.13] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [0.12.12] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [0.12.11] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [0.12.10] - Aprl 07, 2019 +* Change network policy API group + +## [0.12.9] - Aprl 04, 2019 +* Apply the existing PVC for members (in addition to primary) + +## [0.12.8] - Aprl 03, 2019 +* Bugfix for userPluginSecrets + +## [0.12.7] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [0.12.6] - Aprl 03, 2019 +* Added installer info + +## [0.12.5] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [0.12.4] - Apr 02, 2019 +* Fix issue #253 (use existing PVC for data and backup storage) + +## [0.12.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [0.12.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [0.12.1] - Mar 26, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [0.12.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [0.11.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [0.11.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [0.11.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [0.11.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [0.11.14] - Mar 21, 2019 +* Make ingress path configurable + +## [0.11.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart for Primary + +## [0.11.12] - Mar 19, 2019 +* Fix existingClaim example + +## [0.11.11] - Mar 18, 2019 +* Disable the option to use nginx PVC with more than one replica + +## [0.11.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [0.11.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [0.11.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [0.11.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 + +## [0.11.6] - Mar 13, 2019 +* Move securityContext to container level + +## [0.11.5] - Mar 11, 2019 +* Add the option to use existing volume claims for Artifactory storage + +## [0.11.4] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [0.11.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [0.11.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [0.11.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [0.11.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [0.10.3] - Feb 21, 2019 +* Add s3AwsVersion option to awsS3 configuration for use with IAM roles + +## [0.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [0.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [0.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [0.9.7] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [0.9.6] - Feb 7, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [0.9.5] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.4] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.3] - Feb 5, 2019 +* Remove the inactive server remove plugin + +## [0.9.2] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [0.9.1] - Jan 27, 2019 +* Fix support for Azure Blob Storage Binary provider + +## [0.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [0.8.10] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [0.8.9] - Jan 18, 2019 +* Added support of values ingress.labels + +## [0.8.8] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [0.8.7] - Jan 15, 2018 +* Add support for Azure Blob Storage Binary provider + +## [0.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [0.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [0.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [0.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [0.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [0.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [0.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [0.7.17] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [0.7.16] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [0.7.15] - Dec 9, 2018 +* AWS S3 add `roleName` for using IAM role + +## [0.7.14] - Dec 6, 2018 +* AWS S3 `identity` and `credential` are now added only if have a value to allow using IAM role + +## [0.7.13] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [0.7.12] - Dec 2, 2018 +* Remove Java option "-Dartifactory.locking.provider.type=db". This is already the default setting. + +## [0.7.11] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [0.7.10] - Nov 29, 2018 +* Fixed the volumeMount for the replicator.yaml + +## [0.7.9] - Nov 29, 2018 +* Optionally include primary node into poddisruptionbudget + +## [0.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [0.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [0.7.6] - Nov 18, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [0.7.5] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [0.7.4] - Nov 13, 2018 +* Allow pod anti-affinity settings to include primary node + +## [0.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [0.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [0.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [0.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.9] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [0.6.8] - Oct 22, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [0.6.7] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [0.6.6] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [0.6.5] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow `soft` or `hard` specification of member node anti-affinity +* Allow providing pre-existing secrets containing external database credentials +* Fix `s3` binary store provider to properly use the `cache-fs` provider +* Allow arbitrary properties when using the `s3` binary store provider + +## [0.6.4] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 14, 2018 +* Make S3 endpoint configurable (was hardcoded with `s3.amazonaws.com`) + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [0.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [0.5.3] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [0.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [0.4.7] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [0.4.6] - Sep 25, 2018 +* Add PodDisruptionBudget for member nodes, defaulting to minAvailable of 1 + +## [0.4.4] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 + +## [0.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [0.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/Chart.yaml new file mode 100644 index 000000000..c263ba2f5 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/Chart.yaml @@ -0,0 +1,19 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha +apiVersion: v1 +appVersion: 7.17.5 +description: Universal Repository Manager supporting all major packaging formats, build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: installers@jfrog.com + name: Chart Maintainers at JFrog +name: artifactory-ha +sources: +- https://github.com/jfrog/charts +version: 4.13.000 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/LICENSE b/charts/artifactory-ha/artifactory-ha/4.13.000/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/README.md new file mode 100644 index 000000000..4cbe9ff29 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/README.md @@ -0,0 +1,1267 @@ +# JFrog Artifactory High Availability Helm Chart + +**Heads up: Our Helm Chart docs are moving to our main documentation site. For Artifactory installers, see [Installing Artifactory](https://www.jfrog.com/confluence/display/JFROG/Installing+Artifactory).** + +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database **NOTE:** For production grade installations it is recommended to use an external PostgreSQL +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +This can be controlled by the parameter `artifactory.service.pool`. +**NOTE:** + Using artifactory pro license (which supports single node only), set `artifactory.node.replicaCount=0` in values.yaml. + To scale from single node to multiple nodes(>1), use Enterprise(+) license and then do an helm upgrade (Each node need a seperate license). + +## Installing the Chart + +### Add JFrog Helm repository + +Before installing JFrog helm charts, you need to add the [JFrog helm repository](https://charts.jfrog.io) to your helm client + +```bash +helm repo add jfrog https://charts.jfrog.io +helm repo update +``` + +**NOTE:** Passing masterKey is mandatory for fresh install of chart (7.x Appversion) + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: + +```bash +helm upgrade --install artifactory-ha --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory-ha jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade --namespace artifactory-ha jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 +``` +* Note: If you are upgrading from 1.x to 4.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` +``` +In order to use a GCP service account, Artifactory needs a gcp.credentials.json file in the same directory asa binaraystore.xml file. +This can be generated by running: +```bash +gcloud iam service-accounts keys create --iam-account +``` +Which will produce the following, which can be saved to a file or copied into your `values.yaml`. +```bash +{ + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." +} +``` + +One option is to create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` in a custom `values.yaml` +```bash +# Create the Kubernetes secret from the file you created earlier. +# IMPORTANT: The file must be called "gcp.credentials.json" because this is used later as the secret key! +kubectl create secret generic artifactory-gcp-creds --from-file=./gcp.credentials.json +``` +Set this secret in your custom `values.yaml` +```bash +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + customSecretName: artifactory-gcp-creds +``` + +Another option is to put your generated config directly in your custom `values.yaml` and the a secret will be created from it +``` +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + config: | + { + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + } +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +To enable [Direct Cloud Storage Download](https://www.jfrog.com/confluence/display/JFROG/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-1.ConfiguretheArtifactoryFilestore) +```bash +... +--set artifactory.persistence.awsS3V3.enableSignedUrlRedirect=true \ +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Create a unique Master Key + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.masterKeySecretName=my-secret --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory-ha --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory-ha jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.joinKeySecretName=my-secret --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged.. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm upgrade --install artifactory-ha -f values.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory-ha jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory-ha jfrog/artifactory-ha -f admin-creds-values.yaml +``` + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory-ha --set nginx.customConfigMap=nginx-config --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! +##### Configuring Artifactory with external Oracle database +To use artifactory with oracledb the required instant client library files, libaio has to be copied to tomcat lib. Also set LD_LIBRARY_PATH env variable. +1. Create a value file with the configuration +```yaml +postgresql: + enabled: false +database: + type: oracle + driver: oracle.jdbc.OracleDriver + url: + user: + password: +artifactory: + preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O instantclient-basic-linux.x64-19.6.0.0.0dbru.zip https://download.oracle.com/otn_software/linux/instantclient/19600/instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && unzip -jn instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && wget -O libaio1_0.3.110-3_amd64.deb http://ftp.br.debian.org/debian/pool/main/liba/libaio/libaio1_0.3.110-3_amd64.deb && dpkg-deb -x libaio1_0.3.110-3_amd64.deb . && cp lib/x86_64-linux-gnu/* ." + extraEnvironmentVariables: + - name: LD_LIBRARY_PATH + value: /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory jfrog/artifactory-ha --namespace artifactory -f values-oracle.yaml +``` +**NOTE:** If its an upgrade from 6.x to 7.x, add same `preStartCommand` under `artifactory.migration.preStartCommand` + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster + +On helm v2: +```bash +helm delete --purge artifactory-ha +``` + +On helm v3: +```bash +helm delete artifactory-ha --namespace artifactory-ha +``` + +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory-ha --set imagePullSecrets=regsecret --namespace artifactory-ha jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Custom secrets +If you need to add a custom secret in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom secrets in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + # Add custom secrets - secret per file + customSecrets: + - name: custom-secret + key: custom-secret.yaml + data: > + secret data +``` + +To use a custom secret, need to define a custom volume. +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + - name: custom-secret + secret: + secretName: custom-secret +``` + +To use a volume, need to define a volume mount as part of a custom init or sidecar container. +```yaml +artifactory: + customSidecarContainers: + - name: side-car-container + volumeMounts: + - name: custom-secret + mountPath: /opt/custom-secret.yaml + subPath: custom-secret.yaml + readOnly: true +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory-ha -f plugins.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +For additional information, please refer [here](https://www.jfrog.com/confluence/display/JFROG/User+Plugins). + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f configmaps.yaml --namespace artifactory-ha jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + +### Establishing TLS and Adding certificates +In HTTPS, the communication protocol is encrypted using Transport Layer Security (TLS). By default, TLS between JFrog Platform nodes is disabled. +When TLS is enabled, JFrog Access acts as the Certificate Authority (CA) signs the TLS certificates used by all the different JFrog Platform nodes. + +To establish TLS between JFrog Platform nodes: +Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. For more info, Please refer [here](https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates) + +To enable tls in charts, set `tls` to true under `access` in [values.yaml](values.yaml). By default it's false +```yaml +access: + accessConfig: + security: + tls: true +``` + +To add custom tls certificates, create a tls secret from the certificate files. + +```bash +kubectl create secret tls --cert=ca.crt --key=ca.private.key +``` + +For resetting access certificates , you can set `resetAccessCAKeys` to true under access section in [values.yaml](values.yaml) and perform an helm upgrade. +* Note : Once helm upgrade is done, set `resetAccessCAKeys` to false for subsequent upgrades (to avoid resetting access certificates on every helm upgrade) +```yaml +access: + accessConfig: + security: + tls: true + customCertificatesSecretName: + resetAccessCAKeys: true +``` + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f filebeat.yaml --namespace artifactory-ha jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +### Install Artifactory HA with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. +In order to do that, simply add the following to a `artifactory-ssl-values.yaml` file: +```yaml + nginx: + https: + enabled: false + service: + ssloffload: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ssl-values.yaml --namespace artifactory jfrog/artifactory +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these below lines to `artifactory-ingress-values.yaml` file +```yaml + ingress: + enabled: true + hosts: + - artifactory.company.com + artifactory: + service: + type: NodePort + nginx + enabled: false +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ingress-values.yaml--namespace artifactory jfrog/artifactory +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + +### Dedicated Ingress object for replicator service + +You have the option to add additional ingress object to the Replicator service. An example for this use case can be routing the /replicator/ path to Artifactory. +In order to do that, simply add the following to a `artifactory-values.yaml` file: + +```yaml +artifactory: + replicator: + enabled: true + ingress: + name: + hosts: + - myhost.example.com + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-buffering: "off" + nginx.ingress.kubernetes.io/configuration-snippet: | + chunked_transfer_encoding on; + tls: + - hosts: + - "myhost.example.com" + secretName: +``` + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true center/kubernetes-ingress-nginx/ingress-nginx +``` +or create a values.yaml file with the following content: +```yaml +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx -f values.yaml +``` + +### Log Analytics + +#### FluentD, Prometheus and Grafana + +To configure Prometheus and Grafana to gather metrics from Artifactory through the use of FluentD, please refer to the log analytics repo: + +https://github.com/jfrog/log-analytics-prometheus + +That repo contains a file `artifactory-ha-values.yaml` that can be used to deploy Prometheus, Service Monitor, and Grafana with this chart. + + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ReverseProxyConfiguration.md b/charts/artifactory-ha/artifactory-ha/4.13.000/ReverseProxyConfiguration.md new file mode 100644 index 000000000..851593236 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory-ha nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory-ha + ``` \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/UPGRADE_NOTES.md b/charts/artifactory-ha/artifactory-ha/4.13.000/UPGRADE_NOTES.md new file mode 100644 index 000000000..b3326dccc --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/UPGRADE_NOTES.md @@ -0,0 +1,42 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 1.X to 2.X and above (Chart Versions) + +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* To upgrade from a version prior to 1.x, you first need to upgrade to latest version of 1.x as described in https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md. +* Note: If you are upgrading from 1.x to 4.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +## Upgrade from 0.X to 1.X (Chart Versions) +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + a. Scale down the cluster to primary node only (`node.replicaCount=0`) so the exported db and configuration will be kept on one known node (the primary) + b. If your Artifactory HA K8s service is set to member nodes only (`service.pool=members`) you will need to access the primary node directly (use `kubectl port-forward`) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + a. Scale the cluster member nodes back to the original size + * Artifactory should now be ready to get back to normal operation diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/app-readme.md b/charts/artifactory-ha/artifactory-ha/4.13.000/app-readme.md new file mode 100644 index 000000000..a5aa5fd47 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/app-readme.md @@ -0,0 +1,16 @@ +# JFrog Artifactory High Availability Helm Chart + +Universal Repository Manager supporting all major packaging formats, build tools and CI servers. + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server(optional) + +## Useful links +Blog: [Herd Trust Into Your Rancher Labs Multi-Cloud Strategy with Artifactory](https://jfrog.com/blog/herd-trust-into-your-rancher-labs-multi-cloud-strategy-with-artifactory/) + +## Activate Your Artifactory Instance +Don't have a license? Please send an email to [rancher-jfrog-licenses@jfrog.com](mailto:rancher-jfrog-licenses@jfrog.com) to get it. diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/.helmignore b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/Chart.yaml new file mode 100644 index 000000000..2f858e60e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.3.4 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/README.md new file mode 100644 index 000000000..319291bc6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/README.md @@ -0,0 +1,680 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works +```cosnole +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/.helmignore b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 000000000..e6bac7873 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.6.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.6.2 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/README.md new file mode 100644 index 000000000..e04391a3f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/README.md @@ -0,0 +1,274 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Validations** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | When a chart is using `bitnami/mariadb` as subchart you should use this to validate required password are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "context" $` | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Errors** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +**Utils** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +**NOTES.txt** + +``` +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c0ea2c70c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_errors.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..d6d3ec65a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_images.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 000000000..aafde9f3b --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_labels.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_names.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 000000000..adf2a74f4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..d6165a294 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_storage.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_utils.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..7d02f2ef6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_validations.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100644 index 000000000..62635b30e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $valueKeyArray := splitList "." .valueKey -}} + {{- $value := "" -}} + {{- $latestObj := $.context.Values -}} + {{- range $valueKeyArray -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.valueKey | fail -}} + {{- end -}} + + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} + {{- end -}} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a mariadb required password must not be empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "context" $) }} + +Validate value params: + - secret - String - Required. Name of the secret where mysql values are stored, e.g: "mysql-passwords-secret" +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- if and (not .context.Values.mariadb.existingSecret) .context.Values.mariadb.enabled -}} + {{- $requiredPasswords := list -}} + + {{- if .context.Values.mariadb.secret.requirePasswords -}} + {{- $requiredRootMariadbPassword := dict "valueKey" "mariadb.rootUser.password" "secret" .secretName "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootMariadbPassword -}} + + {{- if not (empty .context.Values.mariadb.db.user) -}} + {{- $requiredMariadbPassword := dict "valueKey" "mariadb.db.password" "secret" .secretName "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredMariadbPassword -}} + {{- end -}} + + {{- if .context.Values.mariadb.replication.enabled -}} + {{- $requiredReplicationPassword := dict "valueKey" "mariadb.replication.password" "secret" .secretName "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a postgresql required password must not be empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) $enabled -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if $enabledReplication -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.enabled | quote -}} + {{- else -}} + true + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.replication.enabled | quote -}} + {{- else -}} + {{- .context.Values.replication.enabled | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/values.yaml new file mode 100644 index 000000000..9ecdc93f5 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/commonAnnotations.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 000000000..f6977823c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/shmvolume-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 000000000..347d3b40a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/README.md new file mode 100644 index 000000000..1813a2fea --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/conf.d/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/conf.d/README.md new file mode 100644 index 000000000..184c1875d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 000000000..cba38091e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.lock b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.lock new file mode 100644 index 000000000..72e1642e2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.6.2 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-09-01T17:40:02.795096189Z" diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.yaml new file mode 100644 index 000000000..2c28bfe14 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/NOTES.txt b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/NOTES.txt new file mode 100644 index 000000000..596e969ce --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 000000000..68cd0dc0e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,501 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/configmap.yaml new file mode 100644 index 000000000..b29ef6040 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/extended-config-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 000000000..f21a97654 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/initialization-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 000000000..6637867a3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 000000000..6b7a3171e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-svc.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 000000000..b993c9971 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 000000000..2a7b372fe --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/podsecuritypolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..da0b3ab11 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/prometheusrule.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 000000000..b0c41b1a4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/role.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/role.yaml new file mode 100644 index 000000000..6d3cf50a4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/rolebinding.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 000000000..f7837388d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/secrets.yaml new file mode 100644 index 000000000..c93dbe0bd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 000000000..17f7ff399 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/servicemonitor.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 000000000..d57b7fb48 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset-slaves.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 000000000..54d24099f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,345 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 000000000..0e6eefebb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,514 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-headless.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 000000000..49131578a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-read.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 000000000..885c7bb04 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc.yaml new file mode 100644 index 000000000..e9fc50456 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values-production.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values-production.yaml new file mode 100644 index 000000000..c08014549 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values-production.yaml @@ -0,0 +1,594 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'on' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.schema.json b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.schema.json new file mode 100644 index 000000000..7b5e2efc3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.yaml new file mode 100644 index 000000000..f45c4183d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/charts/postgresql/values.yaml @@ -0,0 +1,600 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ci/access-tls-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/access-tls-values.yaml new file mode 100644 index 000000000..b82db72d7 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/access-tls-values.yaml @@ -0,0 +1,11 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +access: + accessConfig: + security: + tls: true + resetAccessCAKeys: true diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/default-values.yaml new file mode 100644 index 000000000..1ebf93823 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/default-values.yaml @@ -0,0 +1,9 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +databaseUpgradeReady: true +## This is an exception here because HA needs masterKey to connect with other node members and it is commented in values to support 6.x to 7.x Migration +## Please refer https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#special-upgrade-notes-1 +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ci/global-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/global-values.yaml new file mode 100644 index 000000000..e6fb0c283 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/global-values.yaml @@ -0,0 +1,47 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +global: + versions: + artifactory: 7.12.8 + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + customInitContainers: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: volume + # Add custom volumes + customVolumes: | + - name: custom-script + emptyDir: + sizeLimit: 100Mi + # Add custom volumesMounts + customVolumeMounts: | + - name: custom-script + mountPath: "/scripts" + # Add custom sidecar containers + customSidecarContainers: | + - name: "sidecar-list-etc" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + securityContext: + allowPrivilegeEscalation: false + command: ["sh","-c","echo 'Sidecar is running' >> /scripts/sidecar.txt; cat /scripts/sidecar.txt; while true; do sleep 30; done"] + volumeMounts: + - mountPath: "/scripts" + name: custom-script + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ci/migration-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/migration-disabled-values.yaml new file mode 100644 index 000000000..6c1e2587f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/migration-disabled-values.yaml @@ -0,0 +1,8 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + migration: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/ci/test-values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/test-values.yaml new file mode 100644 index 000000000..efe3241d8 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/ci/test-values.yaml @@ -0,0 +1,12 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + persistence: + enabled: true + +postgresql: + postgresqlPassword: "password" + postgresqlExtendedConf: + maxConnections: "102" + persistence: + enabled: true diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/files/migrate.sh b/charts/artifactory-ha/artifactory-ha/4.13.000/files/migrate.sh new file mode 100644 index 000000000..e35bfdbb2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/files/migrate.sh @@ -0,0 +1,4339 @@ +#!/bin/bash + +# Flags +FLAG_Y="y" +FLAG_N="n" +FLAGS_Y_N="$FLAG_Y $FLAG_N" +FLAG_NOT_APPLICABLE="_NA_" + +CURRENT_VERSION=$1 + +WRAPPER_SCRIPT_TYPE_RPMDEB="RPMDEB" +WRAPPER_SCRIPT_TYPE_DOCKER_COMPOSE="DOCKERCOMPOSE" + +SENSITIVE_KEY_VALUE="__sensitive_key_hidden___" + +# Shared system keys +SYS_KEY_SHARED_JFROGURL="shared.jfrogUrl" +SYS_KEY_SHARED_SECURITY_JOINKEY="shared.security.joinKey" +SYS_KEY_SHARED_SECURITY_MASTERKEY="shared.security.masterKey" + +SYS_KEY_SHARED_NODE_ID="shared.node.id" +SYS_KEY_SHARED_JAVAHOME="shared.javaHome" + +SYS_KEY_SHARED_DATABASE_TYPE="shared.database.type" +SYS_KEY_SHARED_DATABASE_TYPE_VALUE_POSTGRES="postgresql" +SYS_KEY_SHARED_DATABASE_DRIVER="shared.database.driver" +SYS_KEY_SHARED_DATABASE_URL="shared.database.url" +SYS_KEY_SHARED_DATABASE_USERNAME="shared.database.username" +SYS_KEY_SHARED_DATABASE_PASSWORD="shared.database.password" + +SYS_KEY_SHARED_ELASTICSEARCH_URL="shared.elasticsearch.url" +SYS_KEY_SHARED_ELASTICSEARCH_USERNAME="shared.elasticsearch.username" +SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD="shared.elasticsearch.password" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP="shared.elasticsearch.clusterSetup" +SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE="shared.elasticsearch.unicastFile" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP_VALUE="YES" + +# Define this in product specific script. Should contain the path to unitcast file +# File used by insight server to write cluster active nodes info. This will be read by elasticsearch +#SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE_VALUE="" + +SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME="shared.rabbitMq.active.node.name" +SYS_KEY_RABBITMQ_ACTIVE_NODE_IP="shared.rabbitMq.active.node.ip" + +# Filenames +FILE_NAME_SYSTEM_YAML="system.yaml" +FILE_NAME_JOIN_KEY="join.key" +FILE_NAME_MASTER_KEY="master.key" +FILE_NAME_INSTALLER_YAML="installer.yaml" + +# Global constants used in business logic +NODE_TYPE_STANDALONE="standalone" +NODE_TYPE_CLUSTER_NODE="node" +NODE_TYPE_DATABASE="database" + +# External(isable) databases +DATABASE_POSTGRES="POSTGRES" +DATABASE_ELASTICSEARCH="ELASTICSEARCH" +DATABASE_RABBITMQ="RABBITMQ" + +POSTGRES_LABEL="PostgreSQL" +ELASTICSEARCH_LABEL="Elasticsearch" +RABBITMQ_LABEL="Rabbitmq" + +ARTIFACTORY_LABEL="Artifactory" +JFMC_LABEL="Mission Control" +DISTRIBUTION_LABEL="Distribution" +XRAY_LABEL="Xray" + +POSTGRES_CONTAINER="postgres" +ELASTICSEARCH_CONTAINER="elasticsearch" +RABBITMQ_CONTAINER="rabbitmq" +REDIS_CONTAINER="redis" + +#Adding a small timeout before a read ensures it is positioned correctly in the screen +read_timeout=0.5 + +# Options related to data directory location +PROMPT_DATA_DIR_LOCATION="Installation Directory" +KEY_DATA_DIR_LOCATION="installer.data_dir" + +SYS_KEY_SHARED_NODE_HAENABLED="shared.node.haEnabled" +PROMPT_ADD_TO_CLUSTER="Are you adding an additional node to an existing product cluster?" +KEY_ADD_TO_CLUSTER="installer.ha" +VALID_VALUES_ADD_TO_CLUSTER="$FLAGS_Y_N" + +MESSAGE_POSTGRES_INSTALL="The installer can install a $POSTGRES_LABEL database, or you can connect to an existing compatible $POSTGRES_LABEL database\n(compatible databases: https://www.jfrog.com/confluence/display/JFROG/System+Requirements#SystemRequirements-RequirementsMatrix)" +PROMPT_POSTGRES_INSTALL="Do you want to install $POSTGRES_LABEL?" +KEY_POSTGRES_INSTALL="installer.install_postgresql" +VALID_VALUES_POSTGRES_INSTALL="$FLAGS_Y_N" + +# Postgres connection details +RPM_DEB_POSTGRES_HOME_DEFAULT="/var/opt/jfrog/postgres" +RPM_DEB_MESSAGE_STANDALONE_POSTGRES_DATA="$POSTGRES_LABEL home will have data and its configuration" +RPM_DEB_PROMPT_STANDALONE_POSTGRES_DATA="Type desired $POSTGRES_LABEL home location" +RPM_DEB_KEY_STANDALONE_POSTGRES_DATA="installer.postgresql.home" + +MESSAGE_DATABASE_URL="Provide the database connection details" +PROMPT_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://:/artifactory" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://:/mission_control?sslmode=disable" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://:/distribution?sslmode=disable" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://:/xraydb?sslmode=disable" + ;; + esac + if [ -z "$databaseURlExample" ]; then + echo -n "$POSTGRES_LABEL URL" # For consistency with username and password + return + fi + echo -n "$POSTGRES_LABEL url. Example: [$databaseURlExample]" +} +REGEX_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://.*/artifactory.*" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://.*/mission_control.*" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://.*/distribution.*" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://.*/xraydb.*" + ;; + esac + echo -n "^$databaseURlExample\$" +} +ERROR_MESSAGE_DATABASE_URL="Invalid $POSTGRES_LABEL URL" +KEY_DATABASE_URL="$SYS_KEY_SHARED_DATABASE_URL" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_USERNAME="$POSTGRES_LABEL username" +KEY_DATABASE_USERNAME="$SYS_KEY_SHARED_DATABASE_USERNAME" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_PASSWORD="$POSTGRES_LABEL password" +KEY_DATABASE_PASSWORD="$SYS_KEY_SHARED_DATABASE_PASSWORD" +IS_SENSITIVE_DATABASE_PASSWORD="$FLAG_Y" + +MESSAGE_STANDALONE_ELASTICSEARCH_INSTALL="The installer can install a $ELASTICSEARCH_LABEL database or you can connect to an existing compatible $ELASTICSEARCH_LABEL database" +PROMPT_STANDALONE_ELASTICSEARCH_INSTALL="Do you want to install $ELASTICSEARCH_LABEL?" +KEY_STANDALONE_ELASTICSEARCH_INSTALL="installer.install_elasticsearch" +VALID_VALUES_STANDALONE_ELASTICSEARCH_INSTALL="$FLAGS_Y_N" + +# Elasticsearch connection details +MESSAGE_ELASTICSEARCH_DETAILS="Provide the $ELASTICSEARCH_LABEL connection details" +PROMPT_ELASTICSEARCH_URL="$ELASTICSEARCH_LABEL URL" +KEY_ELASTICSEARCH_URL="$SYS_KEY_SHARED_ELASTICSEARCH_URL" + +PROMPT_ELASTICSEARCH_USERNAME="$ELASTICSEARCH_LABEL username" +KEY_ELASTICSEARCH_USERNAME="$SYS_KEY_SHARED_ELASTICSEARCH_USERNAME" + +PROMPT_ELASTICSEARCH_PASSWORD="$ELASTICSEARCH_LABEL password" +KEY_ELASTICSEARCH_PASSWORD="$SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD" +IS_SENSITIVE_ELASTICSEARCH_PASSWORD="$FLAG_Y" + +# Cluster related questions +MESSAGE_CLUSTER_MASTER_KEY="Provide the cluster's master key. It can be found in the data directory of the first node under /etc/security/master.key" +PROMPT_CLUSTER_MASTER_KEY="Master Key" +KEY_CLUSTER_MASTER_KEY="$SYS_KEY_SHARED_SECURITY_MASTERKEY" +IS_SENSITIVE_CLUSTER_MASTER_KEY="$FLAG_Y" + +MESSAGE_JOIN_KEY="The Join key is the secret key used to establish trust between services in the JFrog Platform.\n(You can copy the Join Key from Admin > Security > Settings)" +PROMPT_JOIN_KEY="Join Key" +KEY_JOIN_KEY="$SYS_KEY_SHARED_SECURITY_JOINKEY" +IS_SENSITIVE_JOIN_KEY="$FLAG_Y" +REGEX_JOIN_KEY="^[a-zA-Z0-9]{16,}\$" +ERROR_MESSAGE_JOIN_KEY="Invalid Join Key" + +# Rabbitmq related cluster information +MESSAGE_RABBITMQ_ACTIVE_NODE_NAME="Provide an active ${RABBITMQ_LABEL} node name. Run the command [ hostname -s ] on any of the existing nodes in the product cluster to get this" +PROMPT_RABBITMQ_ACTIVE_NODE_NAME="${RABBITMQ_LABEL} active node name" +KEY_RABBITMQ_ACTIVE_NODE_NAME="$SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME" + +# Rabbitmq related cluster information (necessary only for docker-compose) +PROMPT_RABBITMQ_ACTIVE_NODE_IP="${RABBITMQ_LABEL} active node ip" +KEY_RABBITMQ_ACTIVE_NODE_IP="$SYS_KEY_RABBITMQ_ACTIVE_NODE_IP" + +MESSAGE_JFROGURL(){ + echo -e "The JFrog URL allows ${PRODUCT_NAME} to connect to a JFrog Platform Instance.\n(You can copy the JFrog URL from Admin > Security > Settings)" +} +PROMPT_JFROGURL="JFrog URL" +KEY_JFROGURL="$SYS_KEY_SHARED_JFROGURL" +REGEX_JFROGURL="^https?://.*:{0,}[0-9]{0,4}\$" +ERROR_MESSAGE_JFROGURL="Invalid JFrog URL" + + +# Set this to FLAG_Y on upgrade +IS_UPGRADE="${FLAG_N}" + +# This belongs in JFMC but is the ONLY one that needs it so keeping it here for now. Can be made into a method and overridden if necessary +MESSAGE_MULTIPLE_PG_SCHEME="Please setup $POSTGRES_LABEL with schema as described in https://www.jfrog.com/confluence/display/JFROG/Installing+Mission+Control" + +_getMethodOutputOrVariableValue() { + unset EFFECTIVE_MESSAGE + local keyToSearch=$1 + local effectiveMessage= + local result="0" + # logSilly "Searching for method: [$keyToSearch]" + LC_ALL=C type "$keyToSearch" > /dev/null 2>&1 || result="$?" + if [[ "$result" == "0" ]]; then + # logSilly "Found method for [$keyToSearch]" + EFFECTIVE_MESSAGE="$($keyToSearch)" + return + fi + eval EFFECTIVE_MESSAGE=\${$keyToSearch} + if [ ! -z "$EFFECTIVE_MESSAGE" ]; then + return + fi + # logSilly "Didn't find method or variable for [$keyToSearch]" +} + + +# REF https://misc.flogisoft.com/bash/tip_colors_and_formatting +cClear="\e[0m" +cBlue="\e[38;5;69m" +cRedDull="\e[1;31m" +cYellow="\e[1;33m" +cRedBright="\e[38;5;197m" +cBold="\e[1m" + + +_loggerGetModeRaw() { + local MODE="$1" + case $MODE in + INFO) + printf "" + ;; + DEBUG) + printf "%s" "[${MODE}] " + ;; + WARN) + printf "${cRedDull}%s%s${cClear}" "[" "${MODE}" "] " + ;; + ERROR) + printf "${cRedBright}%s%s${cClear}" "[" "${MODE}" "] " + ;; + esac +} + + +_loggerGetMode() { + local MODE="$1" + case $MODE in + INFO) + printf "${cBlue}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + DEBUG) + printf "%-7s" "[${MODE}]" + ;; + WARN) + printf "${cRedDull}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + ERROR) + printf "${cRedBright}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + esac +} + +# Capitalises the first letter of the message +_loggerGetMessage() { + local originalMessage="$*" + local firstChar=$(echo "${originalMessage:0:1}" | awk '{ print toupper($0) }') + local resetOfMessage="${originalMessage:1}" + echo "$firstChar$resetOfMessage" +} + +# The spec also says content should be left-trimmed but this is not necessary in our case. We don't reach the limit. +_loggerGetStackTrace() { + printf "%s%-30s%s" "[" "$1:$2" "]" +} + +_loggerGetThread() { + printf "%s" "[main]" +} + +_loggerGetServiceType() { + printf "%s%-5s%s" "[" "shell" "]" +} + +#Trace ID is not applicable to scripts +_loggerGetTraceID() { + printf "%s" "[]" +} + +logRaw() { + echo "" + printf "$1" + echo "" +} + +logBold(){ + echo "" + printf "${cBold}$1${cClear}" + echo "" +} + +# The date binary works differently based on whether it is GNU/BSD +is_date_supported=0 +date --version > /dev/null 2>&1 || is_date_supported=1 +IS_GNU=$(echo $is_date_supported) + +_loggerGetTimestamp() { + if [ "${IS_GNU}" == "0" ]; then + echo -n $(date -u +%FT%T.%3NZ) + else + echo -n $(date -u +%FT%T.000Z) + fi +} + +# https://www.shellscript.sh/tips/spinner/ +_spin() +{ + spinner="/|\\-/|\\-" + while : + do + for i in `seq 0 7` + do + echo -n "${spinner:$i:1}" + echo -en "\010" + sleep 1 + done + done +} + +showSpinner() { + # Start the Spinner: + _spin & + # Make a note of its Process ID (PID): + SPIN_PID=$! + # Kill the spinner on any signal, including our own exit. + trap "kill -9 $SPIN_PID" `seq 0 15` &> /dev/null || return 0 +} + +stopSpinner() { + local occurrences=$(ps -ef | grep -wc "${SPIN_PID}") + let "occurrences+=0" + # validate that it is present (2 since this search itself will show up in the results) + if [ $occurrences -gt 1 ]; then + kill -9 $SPIN_PID &>/dev/null || return 0 + wait $SPIN_ID &>/dev/null + fi +} + +_getEffectiveMessage(){ + local MESSAGE="$1" + local MODE=${2-"INFO"} + + if [ -z "$CONTEXT" ]; then + CONTEXT=$(caller) + fi + + _EFFECTIVE_MESSAGE= + if [ -z "$LOG_BEHAVIOR_ADD_META" ]; then + _EFFECTIVE_MESSAGE="$(_loggerGetModeRaw $MODE)$(_loggerGetMessage $MESSAGE)" + else + local SERVICE_TYPE="script" + local TRACE_ID="" + local THREAD="main" + + local CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}') + local CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}') + + _EFFECTIVE_MESSAGE="$(_loggerGetTimestamp) $(_loggerGetServiceType) $(_loggerGetMode $MODE) $(_loggerGetTraceID) $(_loggerGetStackTrace $CONTEXT_FILE $CONTEXT_LINE) $(_loggerGetThread) - $(_loggerGetMessage $MESSAGE)" + fi + CONTEXT= +} + +# Important - don't call any log method from this method. Will become an infinite loop. Use echo to debug +_logToFile() { + local MODE=${1-"INFO"} + local targetFile="$LOG_BEHAVIOR_ADD_REDIRECTION" + # IF the file isn't passed, abort + if [ -z "$targetFile" ]; then + return + fi + # IF this is not being run in verbose mode and mode is debug or lower, abort + if [ "${VERBOSE_MODE}" != "$FLAG_Y" ] && [ "${VERBOSE_MODE}" != "true" ] && [ "${VERBOSE_MODE}" != "debug" ]; then + if [ "$MODE" == "DEBUG" ] || [ "$MODE" == "SILLY" ]; then + return + fi + fi + + # Create the file if it doesn't exist + if [ ! -f "${targetFile}" ]; then + return + # touch $targetFile > /dev/null 2>&1 || true + fi + # # Make it readable + # chmod 640 $targetFile > /dev/null 2>&1 || true + + # Log contents + printf "%s\n" "$_EFFECTIVE_MESSAGE" >> "$targetFile" || true +} + +logger() { + if [ "$LOG_BEHAVIOR_ADD_NEW_LINE" == "$FLAG_Y" ]; then + echo "" + fi + _getEffectiveMessage "$@" + local MODE=${2-"INFO"} + printf "%s\n" "$_EFFECTIVE_MESSAGE" + _logToFile "$MODE" +} + +logDebug(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "$FLAG_Y" ] || [ "${VERBOSE_MODE}" == "true" ] || [ "${VERBOSE_MODE}" == "debug" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logSilly(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "silly" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logError() { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= +} + +errorExit () { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= + exit 1 +} + +warn () { + CONTEXT=$(caller) + logger "$1" "WARN" + CONTEXT= +} + +note () { + CONTEXT=$(caller) + logger "$1" "NOTE" + CONTEXT= +} + +bannerStart() { + title=$1 + echo + echo -e "\033[1m${title}\033[0m" + echo +} + +bannerSection() { + title=$1 + echo + echo -e "******************************** ${title} ********************************" + echo +} + +bannerSubSection() { + title=$1 + echo + echo -e "************** ${title} *******************" + echo +} + +bannerMessge() { + title=$1 + echo + echo -e "********************************" + echo -e "${title}" + echo -e "********************************" + echo +} + +setRed () { + local input="$1" + echo -e \\033[31m${input}\\033[0m +} +setGreen () { + local input="$1" + echo -e \\033[32m${input}\\033[0m +} +setYellow () { + local input="$1" + echo -e \\033[33m${input}\\033[0m +} + +logger_addLinebreak () { + echo -e "---\n" +} + +bannerImportant() { + title=$1 + local bold="\033[1m" + local noColour="\033[0m" + echo + echo -e "${bold}######################################## IMPORTANT ########################################${noColour}" + echo -e "${bold}${title}${noColour}" + echo -e "${bold}###########################################################################################${noColour}" + echo +} + +bannerEnd() { + #TODO pass a title and calculate length dynamically so that start and end look alike + echo + echo "*****************************************************************************" + echo +} + +banner() { + title=$1 + content=$2 + bannerStart "${title}" + echo -e "$content" +} + +# The logic below helps us redirect content we'd normally hide to the log file. + # + # We have several commands which clutter the console with output and so use + # `cmd > /dev/null` - this redirects the command's output to null. + # + # However, the information we just hid maybe useful for support. Using the code pattern + # `cmd >&6` (instead of `cmd> >/dev/null` ), the command's output is hidden from the console + # but redirected to the installation log file + # + +#Default value of 6 is just null +exec 6>>/dev/null +redirectLogsToFile() { + echo "" + # local file=$1 + + # [ ! -z "${file}" ] || return 0 + + # local logDir=$(dirname "$file") + + # if [ ! -f "${file}" ]; then + # [ -d "${logDir}" ] || mkdir -p ${logDir} || \ + # ( echo "WARNING : Could not create parent directory (${logDir}) to redirect console log : ${file}" ; return 0 ) + # fi + + # #6 now points to the log file + # exec 6>>${file} + # #reference https://unix.stackexchange.com/questions/145651/using-exec-and-tee-to-redirect-logs-to-stdout-and-a-log-file-in-the-same-time + # exec 2>&1 > >(tee -a "${file}") +} + +# Check if a give key contains any sensitive string as part of it +# Based on the result, the caller can decide its value can be displayed or not +# Sample usage : isKeySensitive "${key}" && displayValue="******" || displayValue=${value} +isKeySensitive(){ + local key=$1 + local sensitiveKeys="password|secret|key|token" + + if [ -z "${key}" ]; then + return 1 + else + local lowercaseKey=$(echo "${key}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + [[ "${lowercaseKey}" =~ ${sensitiveKeys} ]] && return 0 || return 1 + fi +} + +getPrintableValueOfKey(){ + local displayValue= + local key="$1" + if [ -z "$key" ]; then + # This is actually an incorrect usage of this method but any logging will cause unexpected content in the caller + echo -n "" + return + fi + + local value="$2" + isKeySensitive "${key}" && displayValue="$SENSITIVE_KEY_VALUE" || displayValue="${value}" + echo -n $displayValue +} + +_createConsoleLog(){ + if [ -z "${JF_PRODUCT_HOME}" ]; then + return + fi + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + mkdir -p "${JF_PRODUCT_HOME}/var/log" || true + if [ ! -f ${targetFile} ]; then + touch $targetFile > /dev/null 2>&1 || true + fi + chmod 640 $targetFile > /dev/null 2>&1 || true +} + +# Output from application's logs are piped to this method. It checks a configuration variable to determine if content should be logged to +# the common console.log file +redirectServiceLogsToFile() { + + local result="0" + # check if the function getSystemValue exists + LC_ALL=C type getSystemValue > /dev/null 2>&1 || result="$?" + if [[ "$result" != "0" ]]; then + warn "Couldn't find the systemYamlHelper. Skipping log redirection" + return 0 + fi + + getSystemValue "shared.consoleLog" "NOT_SET" + if [[ "${YAML_VALUE}" == "false" ]]; then + logger "Redirection is set to false. Skipping log redirection" + return 0; + fi + + if [ -z "${JF_PRODUCT_HOME}" ] || [ "${JF_PRODUCT_HOME}" == "" ]; then + warn "JF_PRODUCT_HOME is unavailable. Skipping log redirection" + return 0 + fi + + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + + _createConsoleLog + + while read -r line; do + printf '%s\n' "${line}" >> $targetFile || return 0 # Don't want to log anything - might clutter the screen + done +} + +## Display environment variables starting with JF_ along with its value +## Value of sensitive keys will be displayed as "******" +## +## Sample Display : +## +## ======================== +## JF Environment variables +## ======================== +## +## JF_SHARED_NODE_ID : locahost +## JF_SHARED_JOINKEY : ****** +## +## +displayEnv() { + local JFEnv=$(printenv | grep ^JF_ 2>/dev/null) + local key= + local value= + + if [ -z "${JFEnv}" ]; then + return + fi + + cat << ENV_START_MESSAGE + +======================== +JF Environment variables +======================== +ENV_START_MESSAGE + + for entry in ${JFEnv}; do + key=$(echo "${entry}" | awk -F'=' '{print $1}') + value=$(echo "${entry}" | awk -F'=' '{print $2}') + + isKeySensitive "${key}" && value="******" || value=${value} + + printf "\n%-35s%s" "${key}" " : ${value}" + done + echo; +} + +_addLogRotateConfiguration() { + logDebug "Method ${FUNCNAME[0]}" + # mandatory inputs + local confFile="$1" + local logFile="$2" + + # Method available in _ioOperations.sh + LC_ALL=C type io_setYQPath > /dev/null 2>&1 || return 1 + + io_setYQPath + + # Method available in _systemYamlHelper.sh + LC_ALL=C type getSystemValue > /dev/null 2>&1 || return 1 + + local frequency="daily" + local archiveFolder="archived" + + local compressLogFiles= + getSystemValue "shared.logging.rotation.compress" "true" + if [[ "${YAML_VALUE}" == "true" ]]; then + compressLogFiles="compress" + fi + + getSystemValue "shared.logging.rotation.maxFiles" "10" + local noOfBackupFiles="${YAML_VALUE}" + + getSystemValue "shared.logging.rotation.maxSizeMb" "25" + local sizeOfFile="${YAML_VALUE}M" + + logDebug "Adding logrotate configuration for [$logFile] to [$confFile]" + + # Add configuration to file + local confContent=$(cat << LOGROTATECONF +$logFile { + $frequency + missingok + rotate $noOfBackupFiles + $compressLogFiles + notifempty + olddir $archiveFolder + dateext + extension .log + dateformat -%Y-%m-%d + size ${sizeOfFile} +} +LOGROTATECONF +) + echo "${confContent}" > ${confFile} || return 1 +} + +_operationIsBySameUser() { + local targetUser="$1" + local currentUserID=$(id -u) + local currentUserName=$(id -un) + + if [ $currentUserID == $targetUser ] || [ $currentUserName == $targetUser ]; then + echo -n "yes" + else + echo -n "no" + fi +} + +_addCronJobForLogrotate() { + logDebug "Method ${FUNCNAME[0]}" + + # Abort if logrotate is not available + [ "$(io_commandExists 'crontab')" != "yes" ] && warn "cron is not available" && return 1 + + # mandatory inputs + local productHome="$1" + local confFile="$2" + local cronJobOwner="$3" + + # We want to use our binary if possible. It may be more recent than the one in the OS + local logrotateBinary="$productHome/app/third-party/logrotate/logrotate" + + if [ ! -f "$logrotateBinary" ]; then + logrotateBinary="logrotate" + [ "$(io_commandExists 'logrotate')" != "yes" ] && warn "logrotate is not available" && return 1 + fi + local cmd="$logrotateBinary ${confFile} --state $productHome/var/etc/logrotate/logrotate-state" #--verbose + + id -u $cronJobOwner > /dev/null 2>&1 || { warn "User $cronJobOwner does not exist. Aborting logrotate configuration" && return 1; } + + # Remove the existing line + removeLogRotation "$productHome" "$cronJobOwner" || true + + # Run logrotate daily at 23:55 hours + local cronInterval="55 23 * * * $cmd" + + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + # If this is standalone mode, we cannot use -u - the user running this process may not have the necessary privileges + if [ "$standaloneMode" == "no" ]; then + (crontab -l -u $cronJobOwner 2>/dev/null; echo "$cronInterval") | crontab -u $cronJobOwner - + else + (crontab -l 2>/dev/null; echo "$cronInterval") | crontab - + fi +} + +## Configure logrotate for a product +## Failure conditions: +## If logrotation could not be setup for some reason +## Parameters: +## $1: The product name +## $2: The product home +## Depends on global: none +## Updates global: none +## Returns: NA + +configureLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + + # mandatory inputs + local productName="$1" + if [ -z $productName ]; then + warn "Incorrect usage. A product name is necessary for configuring log rotation" && return 1 + fi + + local productHome="$2" + if [ -z $productHome ]; then + warn "Incorrect usage. A product home folder is necessary for configuring log rotation" && return 1 + fi + + local logFile="${productHome}/var/log/console.log" + if [[ $(uname) == "Darwin" ]]; then + logger "Log rotation for [$logFile] has not been configured. Please setup manually" + return 0 + fi + + local userID="$3" + if [ -z $userID ]; then + warn "Incorrect usage. A userID is necessary for configuring log rotation" && return 1 + fi + + local groupID=${4:-$userID} + local logConfigOwner=${5:-$userID} + + logDebug "Configuring log rotation as user [$userID], group [$groupID], effective cron User [$logConfigOwner]" + + local errorMessage="Could not configure logrotate. Please configure log rotation of the file: [$logFile] manually" + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + # TODO move to recursive method + createDir "${productHome}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log/archived" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + + # TODO move to recursive method + createDir "${productHome}/var/etc" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/etc/logrotate" "$logConfigOwner" || { warn "${errorMessage}" && return 1; } + + # conf file should be owned by the user running the script + createFile "${confFile}" "${logConfigOwner}" || { warn "Could not create configuration file [$confFile]" return 1; } + + _addLogRotateConfiguration "${confFile}" "${logFile}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + _addCronJobForLogrotate "${productHome}" "${confFile}" "${logConfigOwner}" || { warn "${errorMessage}" && return 1; } +} + +_pauseExecution() { + if [ "${VERBOSE_MODE}" == "debug" ]; then + + local breakPoint="$1" + if [ ! -z "$breakPoint" ]; then + printf "${cBlue}Breakpoint${cClear} [$breakPoint] " + echo "" + fi + printf "${cBlue}Press enter once you are ready to continue${cClear}" + read -s choice + echo "" + fi +} + +# removeLogRotation "$productHome" "$cronJobOwner" || true +removeLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + if [[ $(uname) == "Darwin" ]]; then + logDebug "Not implemented for Darwin." + return 0 + fi + local productHome="$1" + local cronJobOwner="$2" + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + if [ "$standaloneMode" == "no" ]; then + crontab -l -u $cronJobOwner 2>/dev/null | grep -v "$confFile" | crontab -u $cronJobOwner - + else + crontab -l 2>/dev/null | grep -v "$confFile" | crontab - + fi +} + +# NOTE: This method does not check the configuration to see if redirection is necessary. +# This is intentional. If we don't redirect, tomcat logs might get redirected to a folder/file +# that does not exist, causing the service itself to not start +setupTomcatRedirection() { + logDebug "Method ${FUNCNAME[0]}" + local consoleLog="${JF_PRODUCT_HOME}/var/log/console.log" + _createConsoleLog + export CATALINA_OUT="${consoleLog}" +} + +setupScriptLogsRedirection() { + logDebug "Method ${FUNCNAME[0]}" + if [ -z "${JF_PRODUCT_HOME}" ]; then + logDebug "No JF_PRODUCT_HOME. Returning" + return + fi + # Create the console.log file if it is not already present + # _createConsoleLog || true + # # Ensure any logs (logger/logError/warn) also get redirected to the console.log + # # Using installer.log as a temparory fix. Please change this to console.log once INST-291 is fixed + export LOG_BEHAVIOR_ADD_REDIRECTION="${JF_PRODUCT_HOME}/var/log/console.log" + export LOG_BEHAVIOR_ADD_META="$FLAG_Y" +} + +# Returns Y if this method is run inside a container +isRunningInsideAContainer() { + if [ -f "/.dockerenv" ]; then + echo -n "$FLAG_Y" + else + echo -n "$FLAG_N" + fi +} + +POSTGRES_USER=999 +NGINX_USER=104 +NGINX_GROUP=107 +ES_USER=1000 +REDIS_USER=999 +MONGO_USER=999 +RABBITMQ_USER=999 +LOG_FILE_PERMISSION=640 +PID_FILE_PERMISSION=644 + +# Copy file +copyFile(){ + local source=$1 + local target=$2 + local mode=${3:-overwrite} + local enableVerbose=${4:-"${FLAG_N}"} + local verboseFlag="" + + if [ ! -z "${enableVerbose}" ] && [ "${enableVerbose}" == "${FLAG_Y}" ]; then + verboseFlag="-v" + fi + + if [[ ! ( $source && $target ) ]]; then + warn "Source and target is mandatory to copy file" + return 1 + fi + + if [[ -f "${target}" ]]; then + [[ "$mode" = "overwrite" ]] && ( cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}") || true + else + cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}" + fi +} + +# Copy files recursively from given source directory to destination directory +# This method wil copy but will NOT overwrite +# Destination will be created if its not available +copyFilesNoOverwrite(){ + local src=$1 + local dest=$2 + local enableVerboseCopy="${3:-${FLAG_Y}}" + + if [[ -z "${src}" || -z "${dest}" ]]; then + return + fi + + if [ -d "${src}" ] && [ "$(ls -A ${src})" ]; then + local relativeFilePath="" + local targetFilePath="" + + for file in $(find ${src} -type f 2>/dev/null) ; do + # Derive relative path and attach it to destination + # Example : + # src=/extra_config + # dest=/var/opt/jfrog/artifactory/etc + # file=/extra_config/config.xml + # relativeFilePath=config.xml + # targetFilePath=/var/opt/jfrog/artifactory/etc/config.xml + relativeFilePath=${file/${src}/} + targetFilePath=${dest}${relativeFilePath} + + createDir "$(dirname "$targetFilePath")" + copyFile "${file}" "${targetFilePath}" "no_overwrite" "${enableVerboseCopy}" + done + fi +} + +# TODO : WINDOWS ? +# Check the max open files and open processes set on the system +checkULimits () { + local minMaxOpenFiles=${1:-32000} + local minMaxOpenProcesses=${2:-1024} + local setValue=${3:-true} + local warningMsgForFiles=${4} + local warningMsgForProcesses=${5} + + logger "Checking open files and processes limits" + + local currentMaxOpenFiles=$(ulimit -n) + logger "Current max open files is $currentMaxOpenFiles" + if [ ${currentMaxOpenFiles} != "unlimited" ] && [ "$currentMaxOpenFiles" -lt "$minMaxOpenFiles" ]; then + if [ "${setValue}" ]; then + ulimit -n "${minMaxOpenFiles}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForFiles}" ] || warn "${warningMsgForFiles}" + else + errorExit "Max number of open files $currentMaxOpenFiles, is too low. Cannot run the application!" + fi + fi + + local currentMaxOpenProcesses=$(ulimit -u) + logger "Current max open processes is $currentMaxOpenProcesses" + if [ "$currentMaxOpenProcesses" != "unlimited" ] && [ "$currentMaxOpenProcesses" -lt "$minMaxOpenProcesses" ]; then + if [ "${setValue}" ]; then + ulimit -u "${minMaxOpenProcesses}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForProcesses}" ] || warn "${warningMsgForProcesses}" + else + errorExit "Max number of open files $currentMaxOpenProcesses, is too low. Cannot run the application!" + fi + fi +} + +createDirs() { + local appDataDir=$1 + local serviceName=$2 + local folders="backup bootstrap data etc logs work" + + [ -z "${appDataDir}" ] && errorExit "An application directory is mandatory to create its data structure" || true + [ -z "${serviceName}" ] && errorExit "A service name is mandatory to create service data structure" || true + + for folder in ${folders} + do + folder=${appDataDir}/${folder}/${serviceName} + if [ ! -d "${folder}" ]; then + logger "Creating folder : ${folder}" + mkdir -p "${folder}" || errorExit "Failed to create ${folder}" + fi + done +} + + +testReadWritePermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local test_file=${dir_to_check}/test-permissions + + # Write file + if echo test > ${test_file} 1> /dev/null 2>&1; then + # Write succeeded. Testing read... + if cat ${test_file} > /dev/null; then + rm -f ${test_file} + else + error=true + fi + else + error=true + fi + + if [ ${error} == true ]; then + return 1 + else + return 0 + fi +} + +# Test directory has read/write permissions for current user +testDirectoryPermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local u_id=$(id -u) + local id_str="id ${u_id}" + + logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}" + + if ! testReadWritePermissions ${dir_to_check}; then + error=true + fi + + if [ "${error}" == true ]; then + local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check}) + logger "###########################################################" + logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}" + logger "${stat_data}" + logger "Mounted directory must have read/write permissions for user ${id_str}" + logger "###########################################################" + errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}" + fi + logger "Permissions for ${dir_to_check} are good" +} + +# Utility method to create a directory path recursively with chown feature as +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: Root directory from where the path can be created +## $2: List of recursive child directories seperated by space +## $3: user who should own the directory. Optional +## $4: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA +# +# Usage: +# createRecursiveDir "/opt/jfrog/product/var" "bootstrap tomcat lib" "user_name" "group_name" +createRecursiveDir(){ + local rootDir=$1 + local pathDirs=$2 + local user=$3 + local group=${4:-${user}} + local fullPath= + + [ ! -z "${rootDir}" ] || return 0 + + createDir "${rootDir}" "${user}" "${group}" + + [ ! -z "${pathDirs}" ] || return 0 + + fullPath=${rootDir} + + for dir in ${pathDirs}; do + fullPath=${fullPath}/${dir} + createDir "${fullPath}" "${user}" "${group}" + done +} + +# Utility method to create a directory +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: directory to create +## $2: user who should own the directory. Optional +## $3: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA + +createDir(){ + local dirName="$1" + local printMessage=no + logSilly "Method ${FUNCNAME[0]} invoked with [$dirName]" + [ -z "${dirName}" ] && return + + logDebug "Attempting to create ${dirName}" + mkdir -p "${dirName}" || errorExit "Unable to create directory: [${dirName}]" + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + # Earlier, this line would have returned 1 if it failed. Now it just warns. + # This is intentional. Earlier, this line would NOT be reached if the folder already existed. + # Since it will always come to this line and the script may be running as a non-root user, this method will just warn if + # setting permissions fails (so as to not affect any existing flows) + io_setOwnershipNonRecursive "$dirName" "$userID" "$groupID" || warn "Could not set owner of [$dirName] to [$userID:$groupID]" + fi + # logging message to print created dir with user and group + local logMessage=${4:-$printMessage} + if [[ "${logMessage}" == "yes" ]]; then + logger "Successfully created directory [${dirName}]. Owner: [${userID}:${groupID}]" + fi +} + +removeSoftLinkAndCreateDir () { + local dirName="$1" + local userID="$2" + local groupID="$3" + local logMessage="$4" + removeSoftLink "${dirName}" + createDir "${dirName}" "${userID}" "${groupID}" "${logMessage}" +} + +# Utility method to remove a soft link +removeSoftLink () { + local dirName="$1" + if [[ -L "${dirName}" ]]; then + targetLink=$(readlink -f "${dirName}") + logger "Removing the symlink [${dirName}] pointing to [${targetLink}]" + rm -f "${dirName}" + fi +} + +# Check Directory exist in the path +checkDirExists () { + local directoryPath="$1" + + [[ -d "${directoryPath}" ]] && echo -n "true" || echo -n "false" +} + + +# Utility method to create a file +# Failure conditions: +# Parameters: +## $1: file to create +# Depends on global: none +# Updates global: none +# Returns: NA + +createFile(){ + local fileName="$1" + logSilly "Method ${FUNCNAME[0]} [$fileName]" + [ -f "${fileName}" ] && return 0 + touch "${fileName}" || return 1 + + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + io_setOwnership "$fileName" "$userID" "$groupID" || return 1 + fi +} + +# Check File exist in the filePath +# IMPORTANT- DON'T ADD LOGGING to this method +checkFileExists () { + local filePath="$1" + + [[ -f "${filePath}" ]] && echo -n "true" || echo -n "false" +} + +# Check for directories contains any (files or sub directories) +# IMPORTANT- DON'T ADD LOGGING to this method +checkDirContents () { + local directoryPath="$1" + if [[ "$(ls -1 "${directoryPath}" | wc -l)" -gt 0 ]]; then + echo -n "true" + else + echo -n "false" + fi +} + +# Check contents exist in directory +# IMPORTANT- DON'T ADD LOGGING to this method +checkContentExists () { + local source="$1" + + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + echo -n "false" + else + echo -n "true" + fi +} + +# Resolve the variable +# IMPORTANT- DON'T ADD LOGGING to this method +evalVariable () { + local output="$1" + local input="$2" + + eval "${output}"=\${"${input}"} + eval echo \${"${output}"} +} + +# Usage: if [ "$(io_commandExists 'curl')" == "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_commandExists() { + local commandToExecute="$1" + hash "${commandToExecute}" 2>/dev/null + local rt=$? + if [ "$rt" == 0 ]; then echo -n "yes"; else echo -n "no"; fi +} + +# Usage: if [ "$(io_curlExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_curlExists() { + io_commandExists "curl" +} + + +io_hasMatch() { + logSilly "Method ${FUNCNAME[0]}" + local result=0 + logDebug "Executing [echo \"$1\" | grep \"$2\" >/dev/null 2>&1]" + echo "$1" | grep "$2" >/dev/null 2>&1 || result=1 + return $result +} + +# Utility method to check if the string passed (usually a connection url) corresponds to this machine itself +# Failure conditions: None +# Parameters: +## $1: string to check against +# Depends on global: none +# Updates global: IS_LOCALHOST with value "yes/no" +# Returns: NA + +io_getIsLocalhost() { + logSilly "Method ${FUNCNAME[0]}" + IS_LOCALHOST="$FLAG_N" + local inputString="$1" + logDebug "Parsing [$inputString] to check if we are dealing with this machine itself" + + io_hasMatch "$inputString" "localhost" && { + logDebug "Found localhost. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for localhost" + + local hostIP=$(io_getPublicHostIP) + io_hasMatch "$inputString" "$hostIP" && { + logDebug "Found $hostIP. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostIP" + + local hostID=$(io_getPublicHostID) + io_hasMatch "$inputString" "$hostID" && { + logDebug "Found $hostID. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostID" + + local hostName=$(io_getPublicHostName) + io_hasMatch "$inputString" "$hostName" && { + logDebug "Found $hostName. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostName" + +} + +# Usage: if [ "$(io_tarExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_tarExists() { + io_commandExists "tar" +} + +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostIP() { + local OS_TYPE=$(uname) + local publicHostIP= + if [ "${OS_TYPE}" == "Darwin" ]; then + ipStatus=$(ifconfig en0 | grep "status" | awk '{print$2}') + if [ "${ipStatus}" == "active" ]; then + publicHostIP=$(ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}') + else + errorExit "Host IP could not be resolved!" + fi + elif [ "${OS_TYPE}" == "Linux" ]; then + publicHostIP=$(hostname -i 2>/dev/null || echo "127.0.0.1") + fi + publicHostIP=$(echo "${publicHostIP}" | awk '{print $1}') + echo -n "${publicHostIP}" +} + +# Will return the short host name (up to the first dot) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostName() { + echo -n "$(hostname -s)" +} + +# Will return the full host name (use this as much as possible) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostID() { + echo -n "$(hostname)" +} + +# Utility method to backup a file +# Failure conditions: NA +# Parameters: filePath +# Depends on global: none, +# Updates global: none +# Returns: NA +io_backupFile() { + logSilly "Method ${FUNCNAME[0]}" + fileName="$1" + if [ ! -f "${filePath}" ]; then + logDebug "No file: [${filePath}] to backup" + return + fi + dateTime=$(date +"%Y-%m-%d-%H-%M-%S") + targetFileName="${fileName}.backup.${dateTime}" + yes | \cp -f "$fileName" "${targetFileName}" + logger "File [${fileName}] backedup as [${targetFileName}]" +} + +# Reference https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash/4025065#4025065 +is_number() { + case "$BASH_VERSION" in + 3.1.*) + PATTERN='\^\[0-9\]+\$' + ;; + *) + PATTERN='^[0-9]+$' + ;; + esac + + [[ "$1" =~ $PATTERN ]] +} + +io_compareVersions() { + if [[ $# != 2 ]] + then + echo "Usage: min_version current minimum" + return + fi + + A="${1%%.*}" + B="${2%%.*}" + + if [[ "$A" != "$1" && "$B" != "$2" && "$A" == "$B" ]] + then + io_compareVersions "${1#*.}" "${2#*.}" + else + if is_number "$A" && is_number "$B" + then + if [[ "$A" -eq "$B" ]]; then + echo "0" + elif [[ "$A" -gt "$B" ]]; then + echo "1" + elif [[ "$A" -lt "$B" ]]; then + echo "-1" + fi + fi + fi +} + +# Reference https://stackoverflow.com/questions/369758/how-to-trim-whitespace-from-a-bash-variable +# Strip all leading and trailing spaces +# IMPORTANT- DON'T ADD LOGGING to this method +io_trim() { + local var="$1" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# temporary function will be removing it ASAP +# search for string and replace text in file +replaceText_migration_hook () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + fi +} + +# search for string and replace text in file +replaceText () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + logDebug "Replaced [$regexString] with [$replaceText] in [$file]" + fi +} + +# search for string and prepend text in file +prependText () { + local regexString="$1" + local text="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + else + sed -i -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + fi +} + +# add text to beginning of the file +addText () { + local text="$1" + local file="$2" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + else + sed -i -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + fi +} + +io_replaceString () { + local value="$1" + local firstString="$2" + local secondString="$3" + local separator=${4:-"/"} + local updateValue= + if [[ $(uname) == "Darwin" ]]; then + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + else + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + fi + echo -n "${updateValue}" +} + +_findYQ() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + local parentDir="$1" + if [ -z "$parentDir" ]; then + return + fi + logDebug "Executing command [find "${parentDir}" -name third-party -type d]" + local yq=$(find "${parentDir}" -name third-party -type d) + if [ -d "${yq}/yq" ]; then + export YQ_PATH="${yq}/yq" + fi +} + + +io_setYQPath() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + if [ "$(io_commandExists 'yq')" == "yes" ]; then + return + fi + + if [ ! -z "${JF_PRODUCT_HOME}" ] && [ -d "${JF_PRODUCT_HOME}" ]; then + _findYQ "${JF_PRODUCT_HOME}" + fi + + if [ -z "${YQ_PATH}" ] && [ ! -z "${COMPOSE_HOME}" ] && [ -d "${COMPOSE_HOME}" ]; then + _findYQ "${COMPOSE_HOME}" + fi + # TODO We can remove this block after all the code is restructured. + if [ -z "${YQ_PATH}" ] && [ ! -z "${SCRIPT_HOME}" ] && [ -d "${SCRIPT_HOME}" ]; then + _findYQ "${SCRIPT_HOME}" + fi + +} + +io_getLinuxDistribution() { + LINUX_DISTRIBUTION= + + # Make sure running on Linux + [ $(uname -s) != "Linux" ] && return + + # Find out what Linux distribution we are on + + cat /etc/*-release | grep -i Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 6.x + cat /etc/issue.net | grep Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 7.x + cat /etc/*-release | grep -i centos >/dev/null 2>&1 && LINUX_DISTRIBUTION=CentOS && LINUX_DISTRIBUTION_VER="7" || true + + # OS 8.x + grep -q -i "release 8" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="8" || true + + # OS 7.x + grep -q -i "release 7" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="7" || true + + # OS 6.x + grep -q -i "release 6" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="6" || true + + cat /etc/*-release | grep -i Red | grep -i 'VERSION=7' >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat && LINUX_DISTRIBUTION_VER="7" || true + + cat /etc/*-release | grep -i debian >/dev/null 2>&1 && LINUX_DISTRIBUTION=Debian || true + + cat /etc/*-release | grep -i ubuntu >/dev/null 2>&1 && LINUX_DISTRIBUTION=Ubuntu || true +} + +## Utility method to check ownership of folders/files +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If file is not owned by the user & group +## Parameters: + ## user + ## group + ## folder to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac +io_checkOwner () { + logSilly "Method ${FUNCNAME[0]}" + local osType=$(uname) + + if [ "${osType}" != "Linux" ]; then + logDebug "Unsupported OS. Skipping check" + return 0 + fi + + local file_to_check=$1 + local user_id_to_check=$2 + + + if [ -z "$user_id_to_check" ] || [ -z "$file_to_check" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group_id_to_check=${3:-$user_id_to_check} + local check_user_name=${4:-"no"} + + logDebug "Checking permissions on [$file_to_check] for user [$user_id_to_check] & group [$group_id_to_check]" + + local stat= + + if [ "${check_user_name}" == "yes" ]; then + stat=( $(stat -Lc "%U %G" ${file_to_check}) ) + else + stat=( $(stat -Lc "%u %g" ${file_to_check}) ) + fi + + local user_id=${stat[0]} + local group_id=${stat[1]} + + if [[ "${user_id}" != "${user_id_to_check}" ]] || [[ "${group_id}" != "${group_id_to_check}" ]] ; then + logDebug "Ownership mismatch. [${file_to_check}] is not owned by [${user_id_to_check}:${group_id_to_check}]" + return 1 + else + return 0 + fi +} + +## Utility method to change ownership of a file/folder - NON recursive +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnershipNonRecursive() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown ${user}:${group} ${targetFile}]" + chown ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to change ownership of a file. +## IMPORTANT +## If being called on a folder, should ONLY be called for fresh folders or may cause performance issues +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnership() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown -R ${user}:${group} ${targetFile}]" + chown -R ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to create third party folder structure necessary for Postgres +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## POSTGRESQL_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createPostgresDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${POSTGRESQL_DATA_ROOT}" ] && return 0 + + logDebug "Property [${POSTGRESQL_DATA_ROOT}] exists. Proceeding" + + createDir "${POSTGRESQL_DATA_ROOT}/data" + io_setOwnership "${POSTGRESQL_DATA_ROOT}" "${POSTGRES_USER}" "${POSTGRES_USER}" || errorExit "Setting ownership of [${POSTGRESQL_DATA_ROOT}] to [${POSTGRES_USER}:${POSTGRES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Nginx +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## NGINX_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createNginxDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${NGINX_DATA_ROOT}" ] && return 0 + + logDebug "Property [${NGINX_DATA_ROOT}] exists. Proceeding" + + createDir "${NGINX_DATA_ROOT}" + io_setOwnership "${NGINX_DATA_ROOT}" "${NGINX_USER}" "${NGINX_GROUP}" || errorExit "Setting ownership of [${NGINX_DATA_ROOT}] to [${NGINX_USER}:${NGINX_GROUP}] failed" +} + +## Utility method to create third party folder structure necessary for ElasticSearch +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## ELASTIC_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createElasticSearchDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${ELASTIC_DATA_ROOT}" ] && return 0 + + logDebug "Property [${ELASTIC_DATA_ROOT}] exists. Proceeding" + + createDir "${ELASTIC_DATA_ROOT}/data" + io_setOwnership "${ELASTIC_DATA_ROOT}" "${ES_USER}" "${ES_USER}" || errorExit "Setting ownership of [${ELASTIC_DATA_ROOT}] to [${ES_USER}:${ES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Redis +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## REDIS_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRedisDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${REDIS_DATA_ROOT}" ] && return 0 + + logDebug "Property [${REDIS_DATA_ROOT}] exists. Proceeding" + + createDir "${REDIS_DATA_ROOT}" + io_setOwnership "${REDIS_DATA_ROOT}" "${REDIS_USER}" "${REDIS_USER}" || errorExit "Setting ownership of [${REDIS_DATA_ROOT}] to [${REDIS_USER}:${REDIS_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Mongo +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## MONGODB_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createMongoDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${MONGODB_DATA_ROOT}" ] && return 0 + + logDebug "Property [${MONGODB_DATA_ROOT}] exists. Proceeding" + + createDir "${MONGODB_DATA_ROOT}/logs" + createDir "${MONGODB_DATA_ROOT}/configdb" + createDir "${MONGODB_DATA_ROOT}/db" + io_setOwnership "${MONGODB_DATA_ROOT}" "${MONGO_USER}" "${MONGO_USER}" || errorExit "Setting ownership of [${MONGODB_DATA_ROOT}] to [${MONGO_USER}:${MONGO_USER}] failed" +} + +## Utility method to create third party folder structure necessary for RabbitMQ +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## RABBITMQ_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRabbitMQDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${RABBITMQ_DATA_ROOT}" ] && return 0 + + logDebug "Property [${RABBITMQ_DATA_ROOT}] exists. Proceeding" + + createDir "${RABBITMQ_DATA_ROOT}" + io_setOwnership "${RABBITMQ_DATA_ROOT}" "${RABBITMQ_USER}" "${RABBITMQ_USER}" || errorExit "Setting ownership of [${RABBITMQ_DATA_ROOT}] to [${RABBITMQ_USER}:${RABBITMQ_USER}] failed" +} + +# Add or replace a property in provided properties file +addOrReplaceProperty() { + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + local delimiter=${4:-"="} + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}\s*${delimiter}.*$" ${propertiesPath} > /dev/null 2>&1 + [ $? -ne 0 ] && echo -e "\n${propertyName}${delimiter}${propertyValue}" >> ${propertiesPath} + sed -i -e "s|^${propertyName}\s*${delimiter}.*$|${propertyName}${delimiter}${propertyValue}|g;" ${propertiesPath} +} + +# Set property only if its not set +io_setPropertyNoOverride(){ + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}:" ${propertiesPath} > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${propertyName}: ${propertyValue}" >> ${propertiesPath} || warn "Setting property ${propertyName}: ${propertyValue} in [ ${propertiesPath} ] failed" + else + logger "Skipping update of property : ${propertyName}" >&6 + fi +} + +# Add a line to a file if it doesn't already exist +addLine() { + local line_to_add=$1 + local target_file=$2 + logger "Trying to add line $1 to $2" >&6 2>&1 + cat "$target_file" | grep -F "$line_to_add" -wq >&6 2>&1 + if [ $? != 0 ]; then + logger "Line does not exist and will be added" >&6 2>&1 + echo $line_to_add >> $target_file || errorExit "Could not update $target_file" + fi +} + +# Utility method to check if a value (first paramter) exists in an array (2nd parameter) +# 1st parameter "value to find" +# 2nd parameter "The array to search in. Please pass a string with each value separated by space" +# Example: containsElement "y" "y Y n N" +containsElement () { + local searchElement=$1 + local searchArray=($2) + local found=1 + for elementInIndex in "${searchArray[@]}";do + if [[ $elementInIndex == $searchElement ]]; then + found=0 + fi + done + return $found +} + +# Utility method to get user's choice +# 1st parameter "what to ask the user" +# 2nd parameter "what choices to accept, separated by spaces" +# 3rd parameter "what is the default choice (to use if the user simply presses Enter)" +# Example 'getUserChoice "Are you feeling lucky? Punk!" "y n Y N" "y"' +getUserChoice(){ + configureLogOutput + read_timeout=${read_timeout:-0.5} + local choice="na" + local text_to_display=$1 + local choices=$2 + local default_choice=$3 + users_choice= + + until containsElement "$choice" "$choices"; do + echo "";echo ""; + sleep $read_timeout #This ensures correct placement of the question. + read -p "$text_to_display :" choice + : ${choice:=$default_choice} + done + users_choice=$choice + echo -e "\n$text_to_display: $users_choice" >&6 + sleep $read_timeout #This ensures correct logging +} + +setFilePermission () { + local permission=$1 + local file=$2 + chmod "${permission}" "${file}" || warn "Setting permission ${permission} to file [ ${file} ] failed" +} + + +#setting required paths +setAppDir (){ + SCRIPT_DIR=$(dirname $0) + SCRIPT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + APP_DIR="`cd "${SCRIPT_HOME}";pwd`" +} + +ZIP_TYPE="zip" +COMPOSE_TYPE="compose" +HELM_TYPE="helm" +RPM_TYPE="rpm" +DEB_TYPE="debian" + +sourceScript () { + local file="$1" + + [ ! -z "${file}" ] || errorExit "target file is not passed to source a file" + + if [ ! -f "${file}" ]; then + errorExit "${file} file is not found" + else + source "${file}" || errorExit "Unable to source ${file}, please check if the user ${USER} has permissions to perform this action" + fi +} +# Source required helpers +initHelpers () { + local systemYamlHelper="${APP_DIR}/systemYamlHelper.sh" + local thirdPartyDir=$(find ${APP_DIR}/.. -name third-party -type d) + export YQ_PATH="${thirdPartyDir}/yq" + LIBXML2_PATH="${thirdPartyDir}/libxml2/bin/xmllint" + export LD_LIBRARY_PATH="${thirdPartyDir}/libxml2/lib" + sourceScript "${systemYamlHelper}" +} +# Check migration info yaml file available in the path +checkMigrationInfoYaml () { + + if [[ -f "${APP_DIR}/migrationHelmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationHelmInfo.yaml" + INSTALLER="${HELM_TYPE}" + elif [[ -f "${APP_DIR}/migrationZipInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationZipInfo.yaml" + INSTALLER="${ZIP_TYPE}" + elif [[ -f "${APP_DIR}/migrationRpmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationRpmInfo.yaml" + INSTALLER="${RPM_TYPE}" + elif [[ -f "${APP_DIR}/migrationDebInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationDebInfo.yaml" + INSTALLER="${DEB_TYPE}" + elif [[ -f "${APP_DIR}/migrationComposeInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationComposeInfo.yaml" + INSTALLER="${COMPOSE_TYPE}" + else + errorExit "File migration Info yaml does not exist in [${APP_DIR}]" + fi +} + +retrieveYamlValue () { + local yamlPath="$1" + local value="$2" + local output="$3" + local message="$4" + + [[ -z "${yamlPath}" ]] && errorExit "yamlPath is mandatory to get value from ${MIGRATION_SYSTEM_YAML_INFO}" + + getYamlValue "${yamlPath}" "${MIGRATION_SYSTEM_YAML_INFO}" "false" + value="${YAML_VALUE}" + if [[ -z "${value}" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "Empty value for ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + elif [[ "${output}" == "Skip" ]]; then + return + else + errorExit "${message}" + fi + fi +} + +checkEnv () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + # check Environment JF_PRODUCT_HOME is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_PRODUCT_HOME")" + if [[ -z "${NEW_DATA_DIR}" ]]; then + errorExit "Environment variable JF_PRODUCT_HOME is not set, this is required to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + getCustomDataDir_hook + NEW_DATA_DIR="${OLD_DATA_DIR}" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + else + # check Environment JF_ROOT_DATA_DIR is set before migration + OLD_DATA_DIR="$(evalVariable "OLD_DATA_DIR" "JF_ROOT_DATA_DIR")" + # check Environment JF_ROOT_DATA_DIR is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_ROOT_DATA_DIR")" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi + +} + +getDataDir () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}"|| "${INSTALLER}" == "${HELM_TYPE}" ]]; then + checkEnv + else + getCustomDataDir_hook + NEW_DATA_DIR="`cd "${APP_DIR}"/../../;pwd`" + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi +} + +# Retrieve Product name from MIGRATION_SYSTEM_YAML_INFO +getProduct () { + retrieveYamlValue "migration.product" "${YAML_VALUE}" "Fail" "Empty value under ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + PRODUCT="${YAML_VALUE}" + PRODUCT=$(echo "${PRODUCT}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + if [[ "${PRODUCT}" != "artifactory" && "${PRODUCT}" != "distribution" && "${PRODUCT}" != "xray" ]]; then + errorExit "migration.product in [${MIGRATION_SYSTEM_YAML_INFO}] is not correct, please set based on product as ARTIFACTORY or DISTRIBUTION" + fi + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + JF_USER="${PRODUCT}" + fi +} +# Compare product version with minProductVersion and maxProductVersion +migrateCheckVersion () { + local productVersion="$1" + local minProductVersion="$2" + local maxProductVersion="$3" + local productVersion618="6.18.0" + local unSupportedProductVersions7=("7.2.0 7.2.1") + + if [[ "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 1 ]]; then + logger "Migration not necessary. ${PRODUCT} is already ${productVersion}" + exit 11 + elif [[ "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 1 ]]; then + if [[ ("$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 1) && " ${unSupportedProductVersions7[@]} " =~ " ${CURRENT_VERSION} " ]]; then + touch /tmp/error; + errorExit "Current ${PRODUCT} version (${productVersion}) does not support migration to ${CURRENT_VERSION}" + else + bannerStart "Detected ${PRODUCT} ${productVersion}, initiating migration" + fi + else + logger "Current ${PRODUCT} ${productVersion} version is not supported for migration" + exit 1 + fi +} + +getProductVersion () { + local minProductVersion="$1" + local maxProductVersion="$2" + local newfilePath="$3" + local oldfilePath="$4" + local propertyInDocker="$5" + local property="$6" + local productVersion= + local status= + + if [[ "$INSTALLER" == "${COMPOSE_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + elif [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${propertyInDocker}" "${newfilePath}")" + status="fail" + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + exit 0 + fi + elif [[ "$INSTALLER" == "${HELM_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + else + productVersion="${CURRENT_VERSION}" + [[ -z "${productVersion}" || "${productVersion}" == "" ]] && logger "${PRODUCT} CURRENT_VERSION is not set" && exit 0 + fi + else + if [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${property}" "${newfilePath}")" + status="fail" + elif [[ -f "${oldfilePath}" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + status="success" + else + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + logger "File [${newfilePath}] not found to get current version." + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + fi + exit 0 + fi + fi + if [[ -z "${productVersion}" || "${productVersion}" == "" ]]; then + [[ "${status}" == "success" ]] && logger "No version found in file [${oldfilePath}]." + [[ "${status}" == "fail" ]] && logger "No version found in file [${newfilePath}]." + exit 0 + fi + + migrateCheckVersion "${productVersion}" "${minProductVersion}" "${maxProductVersion}" +} + +readKey () { + local property="$1" + local file="$2" + local version= + + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + version="${value}" && check=true && break + else + check=false + fi + done < "${file}" + if [[ "${check}" == "false" ]]; then + return + fi + echo "${version}" +} + +# create Log directory +createLogDir () { + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" + fi +} + +# Creating migration log file +creationMigrateLog () { + local LOG_FILE_NAME="migration.log" + createLogDir + local MIGRATION_LOG_FILE="${NEW_DATA_DIR}/log/${LOG_FILE_NAME}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + MIGRATION_LOG_FILE="${SCRIPT_HOME}/${LOG_FILE_NAME}" + fi + touch "${MIGRATION_LOG_FILE}" + setFilePermission "${LOG_FILE_PERMISSION}" "${MIGRATION_LOG_FILE}" + exec &> >(tee -a "${MIGRATION_LOG_FILE}") +} +# Set path where system.yaml should create +setSystemYamlPath () { + SYSTEM_YAML_PATH="${NEW_DATA_DIR}/etc/system.yaml" + if [[ "${INSTALLER}" != "${HELM_TYPE}" ]]; then + logger "system.yaml will be created in path [${SYSTEM_YAML_PATH}]" + fi +} +# Create directory +createDirectory () { + local directory="$1" + local output="$2" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${directory}" + mkdir -p "${directory}" && check=true || check=false + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi + setOwnershipBasedOnInstaller "${directory}" +} + +setOwnershipBasedOnInstaller () { + local directory="$1" + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + chown -R ${USER_TO_CHECK}:${GROUP_TO_CHECK} "${directory}" || warn "Setting ownership on $directory failed" + elif [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + io_setOwnership "${directory}" "${JF_USER}" "${JF_USER}" + fi +} + +getUserAndGroup () { + local file="$1" + read uid gid <<<$(stat -c '%U %G' ${file}) + USER_TO_CHECK="${uid}" + GROUP_TO_CHECK="${gid}" +} + +# set ownership +getUserAndGroupFromFile () { + case $PRODUCT in + artifactory) + getUserAndGroup "/etc/opt/jfrog/artifactory/artifactory.properties" + ;; + distribution) + getUserAndGroup "${OLD_DATA_DIR}/etc/versions.properties" + ;; + xray) + getUserAndGroup "${OLD_DATA_DIR}/security/master.key" + ;; + esac +} + +# creating required directories +createRequiredDirs () { + bannerSubSection "CREATING REQUIRED DIRECTORIES" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${JF_USER}" "${JF_USER}" "yes" + io_setOwnership "${NEW_DATA_DIR}" "${JF_USER}" "${JF_USER}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data/postgres" "${POSTGRES_USER}" "${POSTGRES_USER}" "yes" + fi + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + fi +} + +# Check entry in map is format +checkMapEntry () { + local entry="$1" + + [[ "${entry}" != *"="* ]] && echo -n "false" || echo -n "true" +} +# Check value Empty and warn +warnIfEmpty () { + local filePath="$1" + local yamlPath="$2" + local check= + + if [[ -z "${filePath}" ]]; then + warn "Empty value in yamlpath [${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + check=false + else + check=true + fi + echo "${check}" +} + +logCopyStatus () { + local status="$1" + local logMessage="$2" + local warnMessage="$3" + + [[ "${status}" == "success" ]] && logger "${logMessage}" + [[ "${status}" == "fail" ]] && warn "${warnMessage}" +} +# copy contents from source to destination +copyCmd () { + local source="$1" + local target="$2" + local mode="$3" + local status= + + case $mode in + unique) + cp -up "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + specific) + cp -pf "${source}" "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied file [${source}] to [${target}]" "Failed to copy file [${source}] to [${target}]" + ;; + patternFiles) + cp -pf "${source}"* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied files matching [${source}*] to [${target}]" "Failed to copy files matching [${source}*] to [${target}]" + ;; + full) + cp -prf "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + esac +} +# Check contents exist in source before copying +copyOnContentExist () { + local source="$1" + local target="$2" + local mode="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + copyCmd "${source}" "${target}" "${mode}" + else + logger "No contents to copy from [${source}]" + fi +} + +# move source to destination +moveCmd () { + local source="$1" + local target="$2" + local status= + + mv -f "${source}" "${target}" && status="success" || status="fail" + [[ "${status}" == "success" ]] && logger "Successfully moved directory [${source}] to [${target}]" + [[ "${status}" == "fail" ]] && warn "Failed to move directory [${source}] to [${target}]" +} + +# symlink target to source +symlinkCmd () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + local check=false + + if [[ "${symlinkSubDir}" == "subDir" ]]; then + ln -sf "${source}"/* "${target}" && check=true || check=false + else + ln -sf "${source}" "${target}" && check=true || check=false + fi + + [[ "${check}" == "true" ]] && logger "Successfully symlinked directory [${target}] to old [${source}]" + [[ "${check}" == "false" ]] && warn "Symlink operation failed" +} +# Check contents exist in source before symlinking +symlinkOnExist () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + if [[ "${symlinkSubDir}" == "subDir" ]]; then + symlinkCmd "${source}" "${target}" "subDir" + else + symlinkCmd "${source}" "${target}" + fi + else + logger "No contents to symlink from [${source}]" + fi +} + +prependDir () { + local absolutePath="$1" + local fullPath="$2" + local sourcePath= + + if [[ "${absolutePath}" = \/* ]]; then + sourcePath="${absolutePath}" + else + sourcePath="${fullPath}" + fi + echo "${sourcePath}" +} + +getFirstEntry (){ + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $1}' +} + +getSecondEntry () { + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $2}' +} +# To get absolutePath +pathResolver () { + local directoryPath="$1" + local dataDir= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Warning" + dataDir="${YAML_VALUE}" + cd "${dataDir}" + else + cd "${OLD_DATA_DIR}" + fi + absoluteDir="`cd "${directoryPath}";pwd`" + echo "${absoluteDir}" +} + +checkPathResolver () { + local value="$1" + + if [[ "${value}" == \/* ]]; then + value="${value}" + else + value="$(pathResolver "${value}")" + fi + echo "${value}" +} + +propertyMigrate () { + local entry="$1" + local filePath="$2" + local fileName="$3" + local check=false + + local yamlPath="$(getFirstEntry "${entry}")" + local property="$(getSecondEntry "${entry}")" + if [[ -z "${property}" ]]; then + warn "Property is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${property}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + value="$(migrateResolveDerbyPath "${key}" "${value}")" + value="$(migrateResolveHaDirPath "${key}" "${value}")" + value="$(updatePostgresUrlString_Hook "${yamlPath}" "${value}")" + fi + if [[ "${key}" == "context.url" ]]; then + local ip=$(echo "${value}" | awk -F/ '{print $3}' | sed 's/:.*//') + setSystemValue "shared.node.ip" "${ip}" "${SYSTEM_YAML_PATH}" + logger "Setting [shared.node.ip] with [${ip}] in system.yaml" + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" && logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" && check=true && break || check=false + fi + done < "${NEW_DATA_DIR}/${filePath}/${fileName}" + [[ "${check}" == "false" ]] && logger "Property [${property}] not found in file [${fileName}]" +} + +setHaEnabled_hook () { + echo "" +} + +migratePropertiesFiles () { + local fileList= + local filePath= + local fileName= + local map= + + retrieveYamlValue "migration.propertyFiles.files" "fileList" "Skip" + fileList="${YAML_VALUE}" + if [[ -z "${fileList}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF PROPERTY FILES" + for file in ${fileList}; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.propertyFiles.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.propertyFiles.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + if [[ "$(checkFileExists "${NEW_DATA_DIR}/${filePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + # setting haEnabled with true only if ha-node.properties is present + setHaEnabled_hook "${filePath}" + retrieveYamlValue "migration.propertyFiles.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + propertyMigrate "${entry}" "${filePath}" "${fileName}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=property" + fi + done + else + logger "File [${fileName}] was not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} + +createTargetDir () { + local mountDir="$1" + local target="$2" + + logger "Target directory not found [${mountDir}/${target}], creating it" + createDirectoryRecursive "${mountDir}" "${target}" "Warning" +} + +createDirectoryRecursive () { + local mountDir="$1" + local target="$2" + local output="$3" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${mountDir}/${target}" + local directory=$(echo "${target}" | tr '/' ' ' ) + local targetDir="${mountDir}" + for dir in ${directory}; + do + targetDir="${targetDir}/${dir}" + mkdir -p "${targetDir}" && check=true || check=false + setOwnershipBasedOnInstaller "${targetDir}" + done + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi +} + +copyOperation () { + local source="$1" + local target="$2" + local mode="$3" + local check=false + local targetDataDir= + local targetLink= + local date= + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + #remove source if it is a symlink + if [[ -L "${source}" ]]; then + targetLink=$(readlink -f "${source}") + logger "Removing the symlink [${source}] pointing to [${targetLink}]" + rm -f "${source}" + source=${targetLink} + fi + if [[ "$(checkDirExists "${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path" + return + fi + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + logger "No contents to copy from [${source}]" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyOnContentExist "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copySpecificFiles () { + local source="$1" + local target="$2" + local mode="$3" + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkFileExists "${source}")" != "true" ]]; then + logger "Source file [${source}] does not exist in path" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copyPatternMatchingFiles () { + local source="$1" + local target="$2" + local mode="$3" + local sourcePath="${4}" + + # prepend OLD_DATA_DIR only if source is relative path + sourcePath="$(prependDir "${sourcePath}" "${OLD_DATA_DIR}/${sourcePath}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkDirExists "${sourcePath}")" != "true" ]]; then + logger "Source [${sourcePath}] directory not found in path" + return + fi + if ls "${sourcePath}/${source}"* 1> /dev/null 2>&1; then + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${sourcePath}/${source}" "${targetDataDir}/${target}" "${mode}" + else + logger "Source file [${sourcePath}/${source}*] does not exist in path" + fi +} + +copyLogMessage () { + local mode="$1" + case $mode in + specific) + logger "Copy file [${source}] to target [${targetDataDir}/${target}]" + ;; + patternFiles) + logger "Copy files matching [${sourcePath}/${source}*] to target [${targetDataDir}/${target}]" + ;; + full) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + unique) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + esac +} + +copyBannerMessages () { + local mode="$1" + local textMode="$2" + case $mode in + specific) + bannerSection "COPY ${textMode} FILES" + ;; + patternFiles) + bannerSection "COPY MATCHING ${textMode}" + ;; + full) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + unique) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + esac +} + +invokeCopyFunctions () { + local mode="$1" + local source="$2" + local target="$3" + + case $mode in + specific) + copySpecificFiles "${source}" "${target}" "${mode}" + ;; + patternFiles) + retrieveYamlValue "migration.${copyFormat}.sourcePath" "map" "Warning" + local sourcePath="${YAML_VALUE}" + copyPatternMatchingFiles "${source}" "${target}" "${mode}" "${sourcePath}" + ;; + full) + copyOperation "${source}" "${target}" "${mode}" + ;; + unique) + copyOperation "${source}" "${target}" "${mode}" + ;; + esac +} +# Copies contents from source directory and target directory +copyDataDirectories () { + local copyFormat="$1" + local mode="$2" + local map= + local source= + local target= + local textMode= + local targetDataDir= + local copyFormatValue= + + retrieveYamlValue "migration.${copyFormat}" "${copyFormat}" "Skip" + copyFormatValue="${YAML_VALUE}" + if [[ -z "${copyFormatValue}" ]]; then + return + fi + textMode=$(echo "${mode}" | tr '[:lower:]' '[:upper:]' 2>/dev/null) + copyBannerMessages "${mode}" "${textMode}" + retrieveYamlValue "migration.${copyFormat}.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeCopyFunctions "${mode}" "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +invokeMoveFunctions () { + local source="$1" + local target="$2" + local sourceDataDir= + local targetBasename= + # prepend OLD_DATA_DIR only if source is relative path + sourceDataDir=$(prependDir "${source}" "${OLD_DATA_DIR}/${source}") + targetBasename=$(dirname "${target}") + logger "Moving directory source [${sourceDataDir}] to target [${NEW_DATA_DIR}/${target}]" + if [[ "$(checkDirExists "${sourceDataDir}")" != "true" ]]; then + logger "Directory [${sourceDataDir}] not found in path to move" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${targetBasename}")" != "true" ]]; then + createTargetDir "${NEW_DATA_DIR}" "${targetBasename}" + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/${target}" + else + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/tempDir" + moveCmd "${NEW_DATA_DIR}/tempDir" "${NEW_DATA_DIR}/${target}" + fi +} + +# Move source directory and target directory +moveDirectories () { + local moveDataDirectories= + local map= + local source= + local target= + + retrieveYamlValue "migration.moveDirectories" "moveDirectories" "Skip" + moveDirectories="${YAML_VALUE}" + if [[ -z "${moveDirectories}" ]]; then + return + fi + bannerSection "MOVE DIRECTORIES" + retrieveYamlValue "migration.moveDirectories.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeMoveFunctions "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +# Trim masterKey if its generated using hex 32 +trimMasterKey () { + local masterKeyDir=/opt/jfrog/artifactory/var/etc/security + local oldMasterKey=$(<${masterKeyDir}/master.key) + local oldMasterKey_Length=$(echo ${#oldMasterKey}) + local newMasterKey= + if [[ ${oldMasterKey_Length} -gt 32 ]]; then + bannerSection "TRIM MASTERKEY" + newMasterKey=$(echo ${oldMasterKey:0:32}) + cp ${masterKeyDir}/master.key ${masterKeyDir}/backup_master.key + logger "Original masterKey is backed up : ${masterKeyDir}/backup_master.key" + rm -rf ${masterKeyDir}/master.key + echo ${newMasterKey} > ${masterKeyDir}/master.key + logger "masterKey is trimmed : ${masterKeyDir}/master.key" + fi +} + +copyDirectories () { + + copyDataDirectories "copyFiles" "full" + copyDataDirectories "copyUniqueFiles" "unique" + copyDataDirectories "copySpecificFiles" "specific" + copyDataDirectories "copyPatternMatchingFiles" "patternFiles" +} + +symlinkDir () { + local source="$1" + local target="$2" + local targetDir= + local basename= + local targetParentDir= + + targetDir="$(dirname "${target}")" + if [[ "${targetDir}" == "${source}" ]]; then + # symlink the sub directories + createDirectory "${NEW_DATA_DIR}/${target}" "Warning" + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" "subDir" + basename="$(basename "${target}")" + cd "${NEW_DATA_DIR}/${target}" && rm -f "${basename}" + fi + else + targetParentDir="$(dirname "${NEW_DATA_DIR}/${target}")" + createDirectory "${targetParentDir}" "Warning" + if [[ "$(checkDirExists "${targetParentDir}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" + fi + fi +} + +symlinkOperation () { + local source="$1" + local target="$2" + local check=false + local targetLink= + local date= + + # Check if source is a link and do symlink + if [[ -L "${OLD_DATA_DIR}/${source}" ]]; then + targetLink=$(readlink -f "${OLD_DATA_DIR}/${source}") + symlinkOnExist "${targetLink}" "${NEW_DATA_DIR}/${target}" + else + # check if source is directory and do symlink + if [[ "$(checkDirExists "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path to symlink" + return + fi + if [[ "$(checkDirContents "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "No contents found in [${OLD_DATA_DIR}/${source}] to symlink" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" != "true" ]]; then + logger "Target directory [${NEW_DATA_DIR}/${target}] does not exist to create symlink, creating it" + symlinkDir "${source}" "${target}" + else + rm -rf "${NEW_DATA_DIR}/${target}" && check=true || check=false + [[ "${check}" == "false" ]] && warn "Failed to remove contents in [${NEW_DATA_DIR}/${target}/]" + symlinkDir "${source}" "${target}" + fi + fi +} +# Creates a symlink path - Source directory to which the symbolic link should point. +symlinkDirectories () { + local linkFiles= + local map= + local source= + local target= + + retrieveYamlValue "migration.linkFiles" "linkFiles" "Skip" + linkFiles="${YAML_VALUE}" + if [[ -z "${linkFiles}" ]]; then + return + fi + bannerSection "SYMLINK DIRECTORIES" + retrieveYamlValue "migration.linkFiles.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + logger "Symlink directory [${NEW_DATA_DIR}/${target}] to old [${OLD_DATA_DIR}/${source}]" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + symlinkOperation "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +updateConnectionString () { + local yamlPath="$1" + local value="$2" + local mongoPath="shared.mongo.url" + local rabbitmqPath="shared.rabbitMq.url" + local postgresPath="shared.database.url" + local redisPath="shared.redis.connectionString" + local mongoConnectionString="mongo.connectionString" + local sourceKey= + local hostIp=$(io_getPublicHostIP) + local hostKey= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + # Replace @postgres:,@mongodb:,@rabbitmq:,@redis: to @{hostIp}: (Compose Installer) + hostKey="@${hostIp}:" + case $yamlPath in + ${postgresPath}) + sourceKey="@postgres:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoPath}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${rabbitmqPath}) + sourceKey="@rabbitmq:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${redisPath}) + sourceKey="@redis:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoConnectionString}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + esac + fi + echo -n "${value}" +} + +yamlMigrate () { + local entry="$1" + local sourceFile="$2" + local value= + local yamlPath= + local key= + yamlPath="$(getFirstEntry "${entry}")" + key="$(getSecondEntry "${entry}")" + if [[ -z "${key}" ]]; then + warn "key is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + getYamlValue "${key}" "${sourceFile}" "false" + value="${YAML_VALUE}" + if [[ ! -z "${value}" ]]; then + value=$(updateConnectionString "${yamlPath}" "${value}") + fi + if [[ "${PRODUCT}" == "artifactory" ]]; then + replicatorProfiling + fi + if [[ -z "${value}" ]]; then + logger "No value for [${key}] in [${sourceFile}]" + else + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the key [${key}] in system.yaml" + fi +} + +migrateYamlFile () { + local files= + local filePath= + local fileName= + local sourceFile= + local map= + retrieveYamlValue "migration.yaml.files" "files" "Skip" + files="${YAML_VALUE}" + if [[ -z "${files}" ]]; then + return + fi + bannerSection "MIGRATION OF YAML FILES" + for file in $files; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.yaml.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.yaml.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + sourceFile="${NEW_DATA_DIR}/${filePath}/${fileName}" + if [[ "$(checkFileExists "${sourceFile}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + retrieveYamlValue "migration.yaml.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + yamlMigrate "${entry}" "${sourceFile}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done + else + logger "File [${fileName}] is not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} +# updates the key and value in system.yaml +updateYamlKeyValue () { + local entry="$1" + local value= + local yamlPath= + local key= + + yamlPath="$(getFirstEntry "${entry}")" + value="$(getSecondEntry "${entry}")" + if [[ -z "${value}" ]]; then + warn "value is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value [${value}] in system.yaml" +} + +updateSystemYamlFile () { + local updateYaml= + local map= + + retrieveYamlValue "migration.updateSystemYaml" "updateYaml" "Skip" + updateSystemYaml="${YAML_VALUE}" + if [[ -z "${updateSystemYaml}" ]]; then + return + fi + bannerSection "UPDATE SYSTEM YAML FILE WITH KEY AND VALUES" + retrieveYamlValue "migration.updateSystemYaml.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ -z "${map}" ]]; then + return + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + updateYamlKeyValue "${entry}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done +} + +backupFiles_hook () { + logSilly "Method ${FUNCNAME[0]}" +} + +backupDirectory () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyOnContentExist "${targetDir}" "${backupDirectory}/${dir}" "full" + fi +} + +removeOldDirectory () { + local backupDir="$1" + local entry="$2" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${entry}" "${OLD_DATA_DIR}/${entry}")" + local outputCheckDirExists="$(checkDirExists "${targetDir}")" + if [[ "${outputCheckDirExists}" != "true" ]]; then + logger "No [${targetDir}] directory found to delete" + echo ""; + return + fi + backupDirectory "${backupDir}" "${entry}" "${targetDir}" + rm -rf "${targetDir}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed directory [${targetDir}]" + [[ "${check}" == "false" ]] && warn "Failed to remove directory [${targetDir}]" + echo ""; +} + +cleanUpOldDataDirectories () { + local cleanUpOldDataDir= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldDataDir" "cleanUpOldDataDir" "Skip" + cleanUpOldDataDir="${YAML_VALUE}" + if [[ -z "${cleanUpOldDataDir}" ]]; then + return + fi + bannerSection "CLEAN UP OLD DATA DIRECTORIES" + retrieveYamlValue "migration.cleanUpOldDataDir.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old data configurations are backedup in [${backupDir}] directory ******" + backupFiles_hook "${backupDir}/${PRODUCT}" + for entry in $map; + do + removeOldDirectory "${backupDir}" "${entry}" + done +} + +backupFiles () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local fileName="$4" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyCmd "${targetDir}/${fileName}" "${backupDirectory}/${dir}" "specific" + fi +} + +removeOldFiles () { + local backupDir="$1" + local directoryName="$2" + local fileName="$3" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${directoryName}" "${OLD_DATA_DIR}/${directoryName}")" + local outputCheckFileExists="$(checkFileExists "${targetDir}/${fileName}")" + if [[ "${outputCheckFileExists}" != "true" ]]; then + logger "No [${targetDir}/${fileName}] file found to delete" + return + fi + backupFiles "${backupDir}" "${directoryName}" "${targetDir}" "${fileName}" + rm -f "${targetDir}/${fileName}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed file [${targetDir}/${fileName}]" + [[ "${check}" == "false" ]] && warn "Failed to remove file [${targetDir}/${fileName}]" + echo ""; +} + +cleanUpOldFiles () { + local cleanUpFiles= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldFiles" "cleanUpOldFiles" "Skip" + cleanUpOldFiles="${YAML_VALUE}" + if [[ -z "${cleanUpOldFiles}" ]]; then + return + fi + bannerSection "CLEAN UP OLD FILES" + retrieveYamlValue "migration.cleanUpOldFiles.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old files are backedup in [${backupDir}] directory ******" + for entry in $map; + do + local outputCheckMapEntry="$(checkMapEntry "${entry}")" + if [[ "${outputCheckMapEntry}" != "true" ]]; then + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e directoryName=fileName" + fi + local fileName="$(getSecondEntry "${entry}")" + local directoryName="$(getFirstEntry "${entry}")" + [[ -z "${fileName}" ]] && warn "File name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${directoryName}" ]] && warn "Directory name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + removeOldFiles "${backupDir}" "${directoryName}" "${fileName}" + echo ""; + done +} + +startMigration () { + bannerSection "STARTING MIGRATION" +} + +endMigration () { + bannerSection "MIGRATION COMPLETED SUCCESSFULLY" +} + +initialize () { + setAppDir + _pauseExecution "setAppDir" + initHelpers + _pauseExecution "initHelpers" + checkMigrationInfoYaml + _pauseExecution "checkMigrationInfoYaml" + getProduct + _pauseExecution "getProduct" + getDataDir + _pauseExecution "getDataDir" +} + +main () { + case $PRODUCT in + artifactory) + migrateArtifactory + ;; + distribution) + migrateDistribution + ;; + xray) + migrationXray + ;; + esac + exit 0 +} + +# Ensures meta data is logged +LOG_BEHAVIOR_ADD_META="$FLAG_Y" + + +migrateResolveDerbyPath () { + local key="$1" + local value="$2" + + if [[ "${key}" == "url" && "${value}" == *"db.home"* ]]; then + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + derbyPath="/opt/jfrog/artifactory/var/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + else + derbyPath="${NEW_DATA_DIR}/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + fi + fi + echo "${value}" +} + +migrateResolveHaDirPath () { + local key="$1" + local value="$2" + + if [[ "${INSTALLER}" == "${RPM_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" || "${INSTALLER}" == "${DEB_TYPE}" ]]; then + if [[ "${key}" == "artifactory.ha.data.dir" || "${key}" == "artifactory.ha.backup.dir" ]]; then + value=$(checkPathResolver "${value}") + fi + fi + echo "${value}" +} +updatePostgresUrlString_Hook () { + local yamlPath="$1" + local value="$2" + local hostIp=$(io_getPublicHostIP) + local sourceKey="//postgresql:" + if [[ "${yamlPath}" == "shared.database.url" ]]; then + value=$(io_replaceString "${value}" "${sourceKey}" "//${hostIp}:" "#") + fi + echo "${value}" +} +# Check Artifactory product version +checkArtifactoryVersion () { + local minProductVersion="6.0.0" + local maxProductVersion="7.0.0" + local propertyInDocker="ARTIFACTORY_VERSION" + local property="artifactory.version" + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + local newfilePath="${APP_DIR}/../.env" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + else + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="/etc/opt/jfrog/artifactory/artifactory.properties" + fi + + getProductVersion "${minProductVersion}" "${maxProductVersion}" "${newfilePath}" "${oldfilePath}" "${propertyInDocker}" "${property}" +} + +getCustomDataDir_hook () { + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Fail" + OLD_DATA_DIR="${YAML_VALUE}" +} + +# Get protocol value of connector +getXmlConnectorProtocol () { + local i="$1" + local filePath="$2" + local fileName="$3" + local protocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@protocol' ${filePath}/${fileName} 2>/dev/null |awk -F"=" '{print $2}' | tr -d '"') + echo -e "${protocolValue}" +} + +# Get all attributes of connector +getXmlConnectorAttributes () { + local i="$1" + local filePath="$2" + local fileName="$3" + local connectorAttributes=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@*' ${filePath}/${fileName} 2>/dev/null) + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + echo "${connectorAttributes}" +} + +# Get port value of connector +getXmlConnectorPort () { + local i="$1" + local filePath="$2" + local fileName="$3" + local portValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@port' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${portValue}" +} + +# Get maxThreads value of connector +getXmlConnectorMaxThreads () { + local i="$1" + local filePath="$2" + local fileName="$3" + local maxThreadValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@maxThreads' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${maxThreadValue}" +} +# Get sendReasonPhrase value of connector +getXmlConnectorSendReasonPhrase () { + local i="$1" + local filePath="$2" + local fileName="$3" + local sendReasonPhraseValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sendReasonPhrase' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${sendReasonPhraseValue}" +} +# Get relaxedPathChars value of connector +getXmlConnectorRelaxedPathChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedPathCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedPathChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedPathCharsValue=$(io_trim "${relaxedPathCharsValue}") + echo -e "${relaxedPathCharsValue}" +} +# Get relaxedQueryChars value of connector +getXmlConnectorRelaxedQueryChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedQueryCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedQueryChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedQueryCharsValue=$(io_trim "${relaxedQueryCharsValue}") + echo -e "${relaxedQueryCharsValue}" +} + +# Updating system.yaml with Connector port +setConnectorPort () { + local yamlPath="$1" + local valuePort="$2" + local portYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${valuePort}" ]]; then + warn "port value is empty, could not migrate to system.yaml" + return + fi + ## Getting port yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" portYamlPath "Warning" + portYamlPath="${YAML_VALUE}" + if [[ -z "${portYamlPath}" ]]; then + return + fi + setSystemValue "${portYamlPath}" "${valuePort}" "${SYSTEM_YAML_PATH}" + logger "Setting [${portYamlPath}] with value [${valuePort}] in system.yaml" +} + +# Updating system.yaml with Connector maxThreads +setConnectorMaxThread () { + local yamlPath="$1" + local threadValue="$2" + local maxThreadYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${threadValue}" ]]; then + return + fi + ## Getting max Threads yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" maxThreadYamlPath "Warning" + maxThreadYamlPath="${YAML_VALUE}" + if [[ -z "${maxThreadYamlPath}" ]]; then + return + fi + setSystemValue "${maxThreadYamlPath}" "${threadValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${maxThreadYamlPath}] with value [${threadValue}] in system.yaml" +} + +# Updating system.yaml with Connector sendReasonPhrase +setConnectorSendReasonPhrase () { + local yamlPath="$1" + local sendReasonPhraseValue="$2" + local sendReasonPhraseYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${sendReasonPhraseValue}" ]]; then + return + fi + ## Getting sendReasonPhrase yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" sendReasonPhraseYamlPath "Warning" + sendReasonPhraseYamlPath="${YAML_VALUE}" + if [[ -z "${sendReasonPhraseYamlPath}" ]]; then + return + fi + setSystemValue "${sendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${sendReasonPhraseYamlPath}] with value [${sendReasonPhraseValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedPathChars +setConnectorRelaxedPathChars () { + local yamlPath="$1" + local relaxedPathCharsValue="$2" + local relaxedPathCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedPathCharsValue}" ]]; then + return + fi + ## Getting relaxedPathChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedPathCharsYamlPath "Warning" + relaxedPathCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedPathCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedPathCharsYamlPath}" "${relaxedPathCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedPathCharsYamlPath}] with value [${relaxedPathCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedQueryChars +setConnectorRelaxedQueryChars () { + local yamlPath="$1" + local relaxedQueryCharsValue="$2" + local relaxedQueryCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedQueryCharsValue}" ]]; then + return + fi + ## Getting relaxedQueryChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedQueryCharsYamlPath "Warning" + relaxedQueryCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedQueryCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedQueryCharsYamlPath}" "${relaxedQueryCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedQueryCharsYamlPath}] with value [${relaxedQueryCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connectors configurations +setConnectorExtraConfig () { + local yamlPath="$1" + local connectorAttributes="$2" + local extraConfigPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${connectorAttributes}" ]]; then + return + fi + ## Getting extraConfig yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConfig "Warning" + extraConfigPath="${YAML_VALUE}" + if [[ -z "${extraConfigPath}" ]]; then + return + fi + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setSystemValue "${extraConfigPath}" "${connectorAttributes}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConfigPath}] with connector attributes in system.yaml" +} + +# Updating system.yaml with extra Connectors +setExtraConnector () { + local yamlPath="$1" + local extraConnector="$2" + local extraConnectorYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${extraConnector}" ]]; then + return + fi + ## Getting extraConnecotr yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConnectorYamlPath "Warning" + extraConnectorYamlPath="${YAML_VALUE}" + if [[ -z "${extraConnectorYamlPath}" ]]; then + return + fi + getYamlValue "${extraConnectorYamlPath}" "${SYSTEM_YAML_PATH}" "false" + local connectorExtra="${YAML_VALUE}" + if [[ -z "${connectorExtra}" ]]; then + setSystemValue "${extraConnectorYamlPath}" "${extraConnector}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + else + setSystemValue "${extraConnectorYamlPath}" "\"${connectorExtra} ${extraConnector}\"" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + fi +} + +# Migrate extra connectors to system.yaml +migrateExtraConnectors () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local excludeDefaultPort="$4" + local i="$5" + local extraConfig= + local extraConnector= + if [[ "${excludeDefaultPort}" == "yes" ]]; then + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" && "${portValue}" != "${DEFAULT_RT_PORT}" ]] || continue + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + done + else + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + fi +} + +# Migrate connector configurations +migrateConnectorConfig () { + local i="$1" + local protocolType="$2" + local portValue="$3" + local connectorPortYamlPath="$4" + local connectorMaxThreadYamlPath="$5" + local connectorAttributesYamlPath="$6" + local filePath="$7" + local fileName="$8" + local connectorSendReasonPhraseYamlPath="$9" + local connectorRelaxedPathCharsYamlPath="${10}" + local connectorRelaxedQueryCharsYamlPath="${11}" + + # migrate port + setConnectorPort "${connectorPortYamlPath}" "${portValue}" + + # migrate maxThreads + local maxThreadValue=$(getXmlConnectorMaxThreads "$i" "${filePath}" "${fileName}") + setConnectorMaxThread "${connectorMaxThreadYamlPath}" "${maxThreadValue}" + + # migrate sendReasonPhrase + local sendReasonPhraseValue=$(getXmlConnectorSendReasonPhrase "$i" "${filePath}" "${fileName}") + setConnectorSendReasonPhrase "${connectorSendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" + + # migrate relaxedPathChars + local relaxedPathCharsValue=$(getXmlConnectorRelaxedPathChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedPathChars "${connectorRelaxedPathCharsYamlPath}" "\"${relaxedPathCharsValue}\"" + # migrate relaxedQueryChars + local relaxedQueryCharsValue=$(getXmlConnectorRelaxedQueryChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedQueryChars "${connectorRelaxedQueryCharsYamlPath}" "\"${relaxedQueryCharsValue}\"" + + # migrate all attributes to extra config except port , maxThread , sendReasonPhrase ,relaxedPathChars and relaxedQueryChars + local connectorAttributes=$(getXmlConnectorAttributes "$i" "${filePath}" "${fileName}") + connectorAttributes=$(echo "${connectorAttributes}" | sed 's/port="'${portValue}'"//g' | sed 's/maxThreads="'${maxThreadValue}'"//g' | sed 's/sendReasonPhrase="'${sendReasonPhraseValue}'"//g' | sed 's/relaxedPathChars="\'${relaxedPathCharsValue}'\"//g' | sed 's/relaxedQueryChars="\'${relaxedQueryCharsValue}'\"//g') + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setConnectorExtraConfig "${connectorAttributesYamlPath}" "${connectorAttributes}" +} + +# Check for default port 8040 and 8081 in connectors and migrate +migrateConnectorPort () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + local connectorPortYamlPath="$5" + local connectorMaxThreadYamlPath="$6" + local connectorAttributesYamlPath="$7" + local connectorSendReasonPhraseYamlPath="$8" + local connectorRelaxedPathCharsYamlPath="$9" + local connectorRelaxedQueryCharsYamlPath="${10}" + local portYamlPath= + local maxThreadYamlPath= + local status= + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" == *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + RT_DEFAULTPORT_STATUS=success + else + AC_DEFAULTPORT_STATUS=success + fi + migrateConnectorConfig "${i}" "${protocolType}" "${portValue}" "${connectorPortYamlPath}" "${connectorMaxThreadYamlPath}" "${connectorAttributesYamlPath}" "${filePath}" "${fileName}" "${connectorSendReasonPhraseYamlPath}" "${connectorRelaxedPathCharsYamlPath}" "${connectorRelaxedQueryCharsYamlPath}" + done +} + +# migrate to extra, connector having default port and protocol is AJP +migrateDefaultPortIfAjp () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + done + +} + +# Comparing max threads in connectors +compareMaxThreads () { + local firstConnectorMaxThread="$1" + local firstConnectorNode="$2" + local secondConnectorMaxThread="$3" + local secondConnectorNode="$4" + local filePath="$5" + local fileName="$6" + + # choose higher maxThreads connector as Artifactory. + if [[ "${firstConnectorMaxThread}" -gt ${secondConnectorMaxThread} || "${firstConnectorMaxThread}" -eq ${secondConnectorMaxThread} ]]; then + # maxThread is higher in firstConnector, + # Taking firstConnector as Artifactory and SecondConnector as Access + # maxThread is equal in both connector,considering firstConnector as Artifactory and SecondConnector as Access + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + else + # maxThread is higher in SecondConnector, + # Taking SecondConnector as Artifactory and firstConnector as Access + local rtPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +# Check max threads exist to compare +maxThreadsExistToCompare () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local firstConnectorMaxThread= + local secondConnectorMaxThread= + local firstConnectorNode= + local secondConnectorNode= + local status=success + local firstnode=fail + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ ${protocolType} == *AJP* ]]; then + # Migrate Connectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + fi + # store maxthreads value of each connector + if [[ ${firstnode} == "fail" ]]; then + firstConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + firstConnectorNode="${i}" + firstnode=success + else + secondConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + secondConnectorNode="${i}" + fi + done + [[ -z "${firstConnectorMaxThread}" ]] && status=fail + [[ -z "${secondConnectorMaxThread}" ]] && status=fail + # maxThreads is set, now compare MaxThreads + if [[ "${status}" == "success" ]]; then + compareMaxThreads "${firstConnectorMaxThread}" "${firstConnectorNode}" "${secondConnectorMaxThread}" "${secondConnectorNode}" "${filePath}" "${fileName}" + else + # Assume first connector is RT, maxThreads is not set in both connectors + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +migrateExtraBasedOnNonAjpCount () { + local nonAjpCount="$1" + local filePath="$2" + local fileName="$3" + local connectorCount="$4" + local i="$5" + + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ "${protocolType}" == *AJP* ]]; then + if [[ "${nonAjpCount}" -eq 1 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + else + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + continue + fi + fi +} + +# find RT and AC Connector +findRtAndAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local initialAjpCount=0 + local nonAjpCount=0 + + # get the count of non AJP + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] || continue + nonAjpCount=$((initialAjpCount+1)) + initialAjpCount="${nonAjpCount}" + done + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access and artifactory connectors + # Mark port as 8040 for access + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + done + elif [[ "${nonAjpCount}" -eq 2 ]]; then + # compare maxThreads in both connectors + maxThreadsExistToCompare "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${nonAjpCount}" -gt 2 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # setting with default port in system.yaml + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# get the count of non AJP +getCountOfNonAjp () { + local port="$1" + local connectorCount="$2" + local filePath=$3 + local fileName=$4 + local initialNonAjpCount=0 + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${port}" ]] || continue + [[ "${protocolType}" != *AJP* ]] || continue + local nonAjpCount=$((initialNonAjpCount+1)) + initialNonAjpCount="${nonAjpCount}" + done + echo -e "${nonAjpCount}" +} + +# Find for access connector +findAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_RT_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access connector and mark port as that of connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take RT properties into access with 8040 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add RT connector details as access connector and mark port as 8040 + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# Find for artifactory connector +findRtConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_ACCESS_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as RT connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take access properties into artifactory with 8081 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add access connector details as RT connector and mark as ${DEFAULT_RT_PORT} + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +checkForTlsConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + local sslProtocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sslProtocol' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${sslProtocolValue}" == "TLS" ]]; then + bannerImportant "NOTE: Ignoring TLS connector during migration, modify the system yaml to enable TLS. Original server.xml is saved in path [${filePath}/${fileName}]" + TLS_CONNECTOR_EXISTS=${FLAG_Y} + continue + fi + done +} + +# set custom tomcat server Listeners to system.yaml +setListenerConnector () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + for ((i = 1 ; i <= "${listenerCount}" ; i++)) + do + local listenerConnector=$($LIBXML2_PATH --xpath '//Server/Listener['$i']' ${filePath}/${fileName} 2>/dev/null) + local listenerClassName=$($LIBXML2_PATH --xpath '//Server/Listener['$i']/@className' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${listenerClassName}" == *Apr* ]]; then + setExtraConnector "${EXTRA_LISTENER_CONFIG_YAMLPATH}" "${listenerConnector}" + fi + done +} +# add custom tomcat server Listeners +addTomcatServerListeners () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + if [[ "${listenerCount}" == "0" ]]; then + logger "No listener connectors found in the [${filePath}/${fileName}],skipping migration of listener connectors" + else + setListenerConnector "${filePath}" "${fileName}" "${listenerCount}" + setSystemValue "${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}" "true" "${SYSTEM_YAML_PATH}" + logger "Setting [${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}] with value [true] in system.yaml" + fi +} + +# server.xml migration operations +xmlMigrateOperation () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local listenerCount="$4" + RT_DEFAULTPORT_STATUS=fail + AC_DEFAULTPORT_STATUS=fail + TLS_CONNECTOR_EXISTS=${FLAG_N} + + # Check for connector with TLS , if found ignore migrating it + checkForTlsConnector "${filePath}" "${fileName}" "${connectorCount}" + if [[ "${TLS_CONNECTOR_EXISTS}" == "${FLAG_Y}" ]]; then + return + fi + addTomcatServerListeners "${filePath}" "${fileName}" "${listenerCount}" + # Migrate RT default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + # Migrate to extra if RT default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" + # Migrate AC default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + # Migrate to extra if access default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" + + if [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # RT and AC default port found + logger "Artifactory 8081 and Access 8040 default port are found" + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # Only AC default port found,find RT connector + logger "Found Access default 8040 port" + findRtConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # Only RT default port found,find AC connector + logger "Found Artifactory default 8081 port" + findAcConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # RT and AC default port not found, find connector + logger "Artifactory 8081 and Access 8040 default port are not found" + findRtAndAcConnector "${filePath}" "${fileName}" "${connectorCount}" + fi +} + +# get count of connectors +getXmlConnectorCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Service/Connector)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# get count of listener connectors +getTomcatServerListenersCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Listener)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# Migrate server.xml configuration to system.yaml +migrateXmlFile () { + local xmlFiles= + local fileName= + local filePath= + local sourceFilePath= + DEFAULT_ACCESS_PORT="8040" + DEFAULT_RT_PORT="8081" + AC_PORT_YAMLPATH="migration.xmlFiles.serverXml.access.port" + AC_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.access.maxThreads" + AC_SENDREASONPHRASE_YAMLPATH="migration.xmlFiles.serverXml.access.sendReasonPhrase" + AC_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.access.extraConfig" + RT_PORT_YAMLPATH="migration.xmlFiles.serverXml.artifactory.port" + RT_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.artifactory.maxThreads" + RT_SENDREASONPHRASE_YAMLPATH='migration.xmlFiles.serverXml.artifactory.sendReasonPhrase' + RT_RELAXEDPATHCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedPathChars' + RT_RELAXEDQUERYCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedQueryChars' + RT_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.artifactory.extraConfig" + ROUTER_PORT_YAMLPATH="migration.xmlFiles.serverXml.router.port" + EXTRA_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.config" + EXTRA_LISTENER_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.listener" + RT_TOMCAT_HTTPSCONNECTOR_ENABLED="artifactory.tomcat.httpsConnector.enabled" + + retrieveYamlValue "migration.xmlFiles" "xmlFiles" "Skip" + xmlFiles="${YAML_VALUE}" + if [[ -z "${xmlFiles}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF XML FILES" + retrieveYamlValue "migration.xmlFiles.serverXml.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + if [[ -z "${fileName}" ]]; then + return + fi + bannerSubSection "Processing Migration of $fileName" + retrieveYamlValue "migration.xmlFiles.serverXml.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + if [[ -z "${filePath}" ]]; then + return + fi + # prepend NEW_DATA_DIR only if filePath is relative path + sourceFilePath=$(prependDir "${filePath}" "${NEW_DATA_DIR}/${filePath}") + if [[ "$(checkFileExists "${sourceFilePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] is found in path [${sourceFilePath}]" + local connectorCount=$(getXmlConnectorCount "${sourceFilePath}" "${fileName}") + if [[ "${connectorCount}" == "0" ]]; then + logger "No connectors found in the [${filePath}/${fileName}],skipping migration of xml configuration" + return + fi + local listenerCount=$(getTomcatServerListenersCount "${sourceFilePath}" "${fileName}") + xmlMigrateOperation "${sourceFilePath}" "${fileName}" "${connectorCount}" "${listenerCount}" + else + logger "File [${fileName}] is not found in path [${sourceFilePath}] to migrate" + fi +} + +compareArtifactoryUser () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + + if [[ "${oldPropertyValue}" != "${newPropertyValue}" ]]; then + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" + else + logger "No change in property [${property}] value in [${sourceFile}] to migrate" + fi +} + +migrateReplicator () { + local property="$1" + local oldPropertyValue="$2" + local yamlPath="$3" + + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" +} + +compareJavaOptions () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + local oldJavaOption= + local newJavaOption= + local extraJavaOption= + local check=false + local success=true + local status=true + + oldJavaOption=$(echo "${oldPropertyValue}" | awk 'BEGIN{FS=OFS="\""}{for(i=2;i= 3.1.0" + ## Description / note + description: This CVE needs to be fixed in the alpine base image of nginx container. + diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/NOTES.txt b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/NOTES.txt new file mode 100644 index 000000000..019ed74df --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/NOTES.txt @@ -0,0 +1,118 @@ +Congratulations. You have just deployed JFrog Artifactory HA! + +{{- if .Values.artifactory.masterKey }} +{{- if and (not .Values.artifactory.masterKeySecretName) (eq .Values.artifactory.masterKey "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") }} + + +***************************************** WARNING ****************************************** +* Your Artifactory master key is still set to the provided example: * +* artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF * +* * +* You should change this to your own generated key: * +* $ export MASTER_KEY=$(openssl rand -hex 32) * +* $ echo ${MASTER_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.masterKey=${MASTER_KEY}' * +* * +* Alternatively, you can use a pre-existing secret with a key called master-key with * +* '--set artifactory.masterKeySecretName=${SECRET_NAME}' * +******************************************************************************************** +{{- end }} +{{- end }} + +{{- if .Values.artifactory.joinKey }} +{{- if eq .Values.artifactory.joinKey "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" }} + + +***************************************** WARNING ****************************************** +* Your Artifactory join key is still set to the provided example: * +* artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE * +* * +* You should change this to your own generated key: * +* $ export JOIN_KEY=$(openssl rand -hex 32) * +* $ echo ${JOIN_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.joinKey=${JOIN_KEY}' * +* * +******************************************************************************************** +{{- end }} +{{- end }} + + +{{- if .Values.postgresql.enabled }} + +DATABASE: +To extract the database password, run the following +export DB_PASSWORD=$(kubectl get --namespace {{ .Release.Namespace }} $(kubectl get secret --namespace {{ .Release.Namespace }} -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +echo ${DB_PASSWORD} +{{- end }} + +SETUP: +1. Get the Artifactory IP and URL + + {{- if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "artifactory-ha.nginx.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + NOTE: It may take a few minutes for the LoadBalancer public IP to be available! + + You can watch the status of the service by running 'kubectl get svc -w {{ template "artifactory-ha.nginx.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.nginx.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 8080:80 + echo http://127.0.0.1:8080 + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password + + {{- if .Values.artifactory.license.secret }} + +3. Artifactory license(s) is deployed as a Kubernetes secret. This method is relevant for initial deployment only! + Updating the license should be done via Artifactory UI or REST API. If you want to keep managing the artifactory license using the same method, you can use artifactory.copyOnEveryStartup in values.yaml. + + {{- else }} + +3. Add HA licenses to activate Artifactory HA through the Artifactory UI + NOTE: Each Artifactory node requires a valid license. See https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup for more details. + + {{- end }} + +{{ if or .Values.artifactory.primary.javaOpts.jmx.enabled .Values.artifactory.node.javaOpts.jmx.enabled }} +JMX configuration: +{{- if not (contains "LoadBalancer" .Values.artifactory.service.type) }} +If you want to access JMX from you computer with jconsole, you should set ".Values.artifactory.service.type=LoadBalancer" !!! +{{ end }} + +1. Get the Artifactory service IP: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +export PRIMARY_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.primary.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +export MEMBER_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} + +2. Map the service name to the service IP in /etc/hosts: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${PRIMARY_SERVICE_IP} {{ template "artifactory-ha.primary.name" . }}\" >> /etc/hosts" +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${MEMBER_SERVICE_IP} {{ template "artifactory-ha.fullname" . }}\" >> /etc/hosts" +{{- end }} + +3. Launch jconsole: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.primary.javaOpts.jmx.port }} +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/_helpers.tpl new file mode 100644 index 000000000..f8417e945 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/_helpers.tpl @@ -0,0 +1,302 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory-ha.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The primary node name +*/}} +{{- define "artifactory-ha.primary.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-primary" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-primary" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +The member node name +*/}} +{{- define "artifactory-ha.node.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-member" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-member" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory-ha.nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified Replicator app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.replicator.fullname" -}} +{{- if .Values.artifactory.replicator.ingress.name -}} +{{- .Values.artifactory.replicator.ingress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified replicator tracker ingress name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.replicator.tracker.fullname" -}} +{{- if .Values.artifactory.replicator.trackerIngress.name -}} +{{- .Values.artifactory.replicator.trackerIngress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication-tracker" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.nginx.fullname" -}} +{{- if .Values.nginx.fullnameOverride -}} +{{- .Values.nginx.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nginx.name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory-ha.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory-ha.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory-ha.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory-ha.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory-ha.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory-ha.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory-ha.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Scheme (http/https) based on Access TLS enabled/disabled +*/}} +{{- define "artifactory-ha.scheme" -}} +{{- if .Values.access.accessConfig.security.tls -}} +{{- printf "%s" "https" -}} +{{- else -}} +{{- printf "%s" "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKey value +*/}} +{{- define "artifactory-ha.joinKey" -}} +{{- if .Values.global.joinKey -}} +{{- .Values.global.joinKey -}} +{{- else if .Values.artifactory.joinKey -}} +{{- .Values.artifactory.joinKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKey value +*/}} +{{- define "artifactory-ha.masterKey" -}} +{{- if .Values.global.masterKey -}} +{{- .Values.global.masterKey -}} +{{- else if .Values.artifactory.masterKey -}} +{{- .Values.artifactory.masterKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKeySecretName value +*/}} +{{- define "artifactory-ha.joinKeySecretName" -}} +{{- if .Values.global.joinKeySecretName -}} +{{- .Values.global.joinKeySecretName -}} +{{- else if .Values.artifactory.joinKeySecretName -}} +{{- .Values.artifactory.joinKeySecretName -}} +{{- else -}} +{{ include "artifactory-ha.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKeySecretName value +*/}} +{{- define "artifactory-ha.masterKeySecretName" -}} +{{- if .Values.global.masterKeySecretName -}} +{{- .Values.global.masterKeySecretName -}} +{{- else if .Values.artifactory.masterKeySecretName -}} +{{- .Values.artifactory.masterKeySecretName -}} +{{- else -}} +{{ include "artifactory-ha.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve imagePullSecrets value +*/}} +{{- define "artifactory-ha.imagePullSecrets" -}} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainersBegin value +*/}} +{{- define "artifactory-ha.customInitContainersBegin" -}} +{{- if .Values.global.customInitContainersBegin -}} +{{- .Values.global.customInitContainersBegin -}} +{{- else if .Values.artifactory.customInitContainersBegin -}} +{{- .Values.artifactory.customInitContainersBegin -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainers value +*/}} +{{- define "artifactory-ha.customInitContainers" -}} +{{- if .Values.global.customInitContainers -}} +{{- .Values.global.customInitContainers -}} +{{- else if .Values.artifactory.customInitContainers -}} +{{- .Values.artifactory.customInitContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumes value +*/}} +{{- define "artifactory-ha.customVolumes" -}} +{{- if .Values.global.customVolumes -}} +{{- .Values.global.customVolumes -}} +{{- else if .Values.artifactory.customVolumes -}} +{{- .Values.artifactory.customVolumes -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumeMounts value +*/}} +{{- define "artifactory-ha.customVolumeMounts" -}} +{{- if .Values.global.customVolumeMounts -}} +{{- .Values.global.customVolumeMounts -}} +{{- else if .Values.artifactory.customVolumeMounts -}} +{{- .Values.artifactory.customVolumeMounts -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customSidecarContainers value +*/}} +{{- define "artifactory-ha.customSidecarContainers" -}} +{{- if .Values.global.customSidecarContainers -}} +{{- .Values.global.customSidecarContainers -}} +{{- else if .Values.artifactory.customSidecarContainers -}} +{{- .Values.artifactory.customSidecarContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory chart image names +*/}} +{{- define "artifactory-ha.getImageInfoByValue" -}} +{{- $dot := index . 0 }} +{{- $indexReference := index . 1 }} +{{- $registryName := index $dot.Values $indexReference "image" "registry" -}} +{{- $repositoryName := index $dot.Values $indexReference "image" "repository" -}} +{{- $tag := default $dot.Chart.AppVersion (index $dot.Values $indexReference "image" "tag") | toString -}} +{{- if $dot.Values.global }} + {{- if and $dot.Values.global.versions.artifactory (or (eq $indexReference "artifactory") (eq $indexReference "nginx") ) }} + {{- $tag = $dot.Values.global.versions.artifactory | toString -}} + {{- end -}} + {{- if $dot.Values.global.imageRegistry }} + {{- printf "%s/%s:%s" $dot.Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory app version +*/}} +{{- define "artifactory-ha.app.version" -}} +{{- $image := split ":" ((include "artifactory-ha.getImageInfoByValue" (list . "artifactory")) | toString) -}} +{{- $tag := $image._1 -}} +{{- printf "%s" $tag -}} +{{- end -}} + +{{/* +Custom certificate copy command +*/}} +{{- define "artifactory-ha.copyCustomCerts" -}} +echo "Copy custom certificates to {{ .Values.artifactory.persistence.mountPath }}/etc/security/keys/trusted"; +mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security/keys/trusted; +find /tmp/certs -type f -not -name "*.key" -exec cp -v {} {{ .Values.artifactory.persistence.mountPath }}/etc/security/keys/trusted \;; +find {{ .Values.artifactory.persistence.mountPath }}/etc/security/keys/trusted/ -type f -name "tls.crt" -exec mv -v {} {{ .Values.artifactory.persistence.mountPath }}/etc/security/keys/trusted/ca.crt \;; +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/additional-resources.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/additional-resources.yaml new file mode 100644 index 000000000..c4d06f08a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/additional-resources.yaml @@ -0,0 +1,3 @@ +{{ if .Values.additionalResources }} +{{ tpl .Values.additionalResources . }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/admin-bootstrap-creds.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/admin-bootstrap-creds.yaml new file mode 100644 index 000000000..b344e86b6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/admin-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} +{{- if .Values.artifactory.admin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "%s@%s=%s" .Values.artifactory.admin.username .Values.artifactory.admin.ip .Values.artifactory.admin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-access-config.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-access-config.yaml new file mode 100644 index 000000000..4eac505bc --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-access-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.access.accessConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-access-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + access.config.patch.yml: | +{{ tpl (toYaml .Values.access.accessConfig) . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-binarystore-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-binarystore-secret.yaml new file mode 100644 index 000000000..2e7cac758 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-binarystore + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-configmaps.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-configmaps.yaml new file mode 100644 index 000000000..1385bc578 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + labels: + app: {{ template "artifactory-ha.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-custom-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-custom-secrets.yaml new file mode 100644 index 000000000..67473fc58 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.artifactory.customSecrets }} +{{- range .Values.artifactory.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + component: "{{ $.Values.artifactory.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-database-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-database-secrets.yaml new file mode 100644 index 000000000..9ff71855f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-database-secrets.yaml @@ -0,0 +1,22 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-database-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-gcp-credentials-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-gcp-credentials-secret.yaml new file mode 100644 index 000000000..c6a2682c8 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-gcp-credentials-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} +{{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-gcpcreds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + gcp.credentials.json: |- +{{ tpl .Values.artifactory.persistence.googleStorage.gcpServiceAccount.config . | indent 4 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-installer-info.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-installer-info.yaml new file mode 100644 index 000000000..e58ec41b3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-installer-info.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + {{ tpl .Values.installerInfo . }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-license-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-license-secret.yaml new file mode 100644 index 000000000..3f629c6e4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-license + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-migration-scripts.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-migration-scripts.yaml new file mode 100644 index 000000000..fe40f980f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-migration-scripts.yaml @@ -0,0 +1,18 @@ +{{- if .Values.artifactory.migration.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + migrate.sh: | +{{ .Files.Get "files/migrate.sh" | indent 4 }} + migrationHelmInfo.yaml: | +{{ .Files.Get "files/migrationHelmInfo.yaml" | indent 4 }} + migrationStatus.sh: | +{{ .Files.Get "files/migrationStatus.sh" | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-networkpolicy.yaml new file mode 100644 index 000000000..371dc9a5f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-nfs-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-nfs-pvc.yaml new file mode 100644 index 000000000..6ed7d82f6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-nfs-pvc.yaml @@ -0,0 +1,101 @@ +{{- if eq .Values.artifactory.persistence.type "nfs" }} +### Artifactory HA data +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-data-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haDataMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-data-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +--- +### Artifactory HA backup +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-backup-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haBackupMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-backup-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-pdb.yaml new file mode 100644 index 000000000..cb45027cf --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.artifactory.node.minAvailable -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-node + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.artifactory.name }} + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.node.minAvailable }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-statefulset.yaml new file mode 100644 index 000000000..18f5de1ff --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-node-statefulset.yaml @@ -0,0 +1,777 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.node.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.node.name" . }} + replicas: {{ .Values.artifactory.node.replicaCount }} + updateStrategy: {{- toYaml .Values.artifactory.node.updateStrategy | nindent 4}} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.node.name" . }} + heritage: {{ .Release.Service }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/database-secrets: {{ include (print $.Template.BasePath "/artifactory-database-secrets.yaml") . | sha256sum }} + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.setSecurityContext }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.gid }} + {{- end }} + initContainers: + {{- if or .Values.artifactory.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "artifactory-ha.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled }} + - name: "wait-for-primary" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + echo "Waiting for primary node to be ready..."; + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled .Values.artifactory.node.waitForPrimaryStartup.time }} + echo "Sleeping to allow time for primary node to come up"; + sleep {{ .Values.artifactory.node.waitForPrimaryStartup.time }}; + {{- else }} + while [ "$(wget --spider --no-check-certificate -S -T 3 {{ include "artifactory-ha.scheme" . }}://{{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.externalPort }}/ 2>&1 | grep '^ HTTP/' | awk '{print $2}')" != "200" ]; + do echo "Primary not ready. Waiting..."; sleep 3; + done; + echo "Primary node ready!"; + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + echo "Removing join.key file"; + rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/security/join.key; + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys - load from database"; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Load custom certificates from database"; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + env: + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.masterKeySecretName" . }} + key: master-key + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else if .Values.artifactory.systemYaml }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- end }} + {{- if and .Values.artifactory.customPersistentPodVolumeClaim (not .Values.artifactory.customPersistentPodVolumeClaim.skipPrepareContainer) }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if or .Values.artifactory.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: copy-custom-certificates + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > +{{ include "artifactory-ha.copyCustomCerts" . | indent 10 }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath }} + - name: ca-certs + mountPath: "/tmp/certs" + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customInitContainers .Values.global.customInitContainers }} +{{ tpl (include "artifactory-ha.customInitContainers" .) . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ include "artifactory-ha.app.version" . }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} +{{- end }} + {{- if .Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + {{- with .Values.artifactory.node.preStartCommand }} + echo "Running member node specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + {{- with .Values.artifactory.postStartCommand }} + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo "Running custom postStartCommand command"; + {{ tpl . $ }} + {{- end }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + name: http + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + name: http-internal + {{- if .Values.artifactory.node.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.node.javaOpts.jmx.port }} + name: tcp-jmx + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + {{- if .Values.artifactory.startupProbe.enabled }} + startupProbe: + httpGet: + path: {{ .Values.artifactory.startupProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.artifactory.startupProbe.failureThreshold }} + timeoutSeconds: {{ .Values.artifactory.startupProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if or .Values.artifactory.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "artifactory-ha.customSidecarContainers" .) . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.node.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.node.affinity }} + {{- with .Values.artifactory.node.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- end }} + {{- with .Values.artifactory.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if or .Values.artifactory.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + secret: + secretName: {{ default .Values.global.customCertificates.certificateSecretName .Values.artifactory.customCertificates.certificateSecretName }} + {{- end }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + secret: + secretName: {{ default (printf "%s-%s" (include "artifactory-ha.primary.name" .) "system-yaml") .Values.systemYamlOverride.existingSecret }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if or .Values.artifactory.customVolumes .Values.global.customVolumes }} +{{ tpl (include "artifactory-ha.customVolumes" .) . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.node.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-pdb.yaml new file mode 100644 index 000000000..cc4dfab65 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.artifactory.primary.minAvailable -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-primary + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.artifactory.name }} + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.primary.minAvailable }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-statefulset.yaml new file mode 100644 index 000000000..c86a8b1cb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-primary-statefulset.yaml @@ -0,0 +1,897 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + version: {{ include "artifactory-ha.app.version" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md) \nNote: This applies only when you are using bundled postgresql (postgresql.enabled=true) \nIf you are upgrading from a chart version (< 4.x) that has postgresql.image.tag of 9.x or 10.x, make sure to pass the current postgresql.image.tag and set databaseUpgradeReady=true \nOR \nIf you are upgrading from a chart version (>= 4.x), just set databaseUpgradeReady=true \n" .Values.databaseUpgradeReady | quote }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.primary.name" . }} + replicas: {{ .Values.artifactory.primary.replicaCount }} + updateStrategy: {{- toYaml .Values.artifactory.primary.updateStrategy | nindent 4}} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.primary.name" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/database-secrets: {{ include (print $.Template.BasePath "/artifactory-database-secrets.yaml") . | sha256sum }} + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.access.accessConfig }} + checksum/access-config: {{ include (print $.Template.BasePath "/artifactory-access-config.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} + checksum/admin-creds: {{ include (print $.Template.BasePath "/admin-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.setSecurityContext }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.gid }} + {{- end }} + initContainers: + {{- if or .Values.artifactory.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "artifactory-ha.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found; + rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + subPath: {{ .Values.artifactory.admin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + {{- if .Values.access.accessConfig }} + echo "Copy access.config.patch.yml to {{ .Values.artifactory.persistence.mountPath }}/etc/access"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -fv /tmp/etc/access.config.patch.yml {{ .Values.artifactory.persistence.mountPath }}/etc/access/access.config.patch.yml; + {{- end }} + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + touch {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/reset_ca_keys; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Copying custom certificates to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + cp -fv /tmp/etc/tls.crt {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.crt; + cp -fv /tmp/etc/tls.key {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.private.key; + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security; + echo -n ${ARTIFACTORY_JOIN_KEY} > {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security/join.key; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + {{- end }} + env: + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + - name: ARTIFACTORY_JOIN_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.joinKeySecretName" . }} + key: join-key + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.masterKeySecretName" . }} + key: master-key + {{- end }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else if .Values.artifactory.systemYaml }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + mountPath: "/tmp/etc/access.config.patch.yml" + subPath: access.config.patch.yml + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + mountPath: "/tmp/etc/tls.crt" + subPath: tls.crt + - name: access-certs + mountPath: "/tmp/etc/tls.key" + subPath: tls.key + {{- end }} + {{- if and .Values.artifactory.customPersistentPodVolumeClaim (not .Values.artifactory.customPersistentPodVolumeClaim.skipPrepareContainer) }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + capabilities: + add: + - CHOWN + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if or .Values.artifactory.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: copy-custom-certificates + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > +{{ include "artifactory-ha.copyCustomCerts" . | indent 10 }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath }} + - name: ca-certs + mountPath: "/tmp/certs" + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customInitContainers .Values.global.customInitContainers }} +{{ tpl (include "artifactory-ha.customInitContainers" .) . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ include "artifactory-ha.app.version" . }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} +{{- end }} + {{- if .Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 6 }} + {{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + if [ -d /artifactory_extra_conf ] && [ -d /artifactory_bootstrap ]; then + echo "Copying bootstrap config from /artifactory_extra_conf to /artifactory_bootstrap"; + cp -Lrfv /artifactory_extra_conf/ /artifactory_bootstrap/; + fi; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_bootstrap/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /artifactory_bootstrap/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- with .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.artifactory.primary.preStartCommand }} + echo "Running primary specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + {{- with .Values.artifactory.postStartCommand }} + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo "Running custom postStartCommand command"; + {{ tpl . $ }}; + {{- end }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + name: http + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + name: http-internal + {{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.primary.javaOpts.jmx.port }} + name: tcp-jmx + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + mountPath: "/artifactory_bootstrap/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + {{- if .Values.artifactory.startupProbe.enabled }} + startupProbe: + httpGet: + path: {{ .Values.artifactory.startupProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.artifactory.startupProbe.failureThreshold }} + timeoutSeconds: {{ .Values.artifactory.startupProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if or .Values.artifactory.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "artifactory-ha.customSidecarContainers" .) . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.primary.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.primary.affinity }} + {{- with .Values.artifactory.primary.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.artifactory.primary.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if or .Values.artifactory.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + secret: + secretName: {{ default .Values.global.customCertificates.certificateSecretName .Values.artifactory.customCertificates.certificateSecretName }} + {{- end }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + secretName: {{ .Values.artifactory.admin.secret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + secret: + secretName: {{ default (printf "%s-%s" (include "artifactory-ha.primary.name" .) "system-yaml") .Values.systemYamlOverride.existingSecret }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + secret: + secretName: {{ template "artifactory-ha.fullname" . }}-access-config + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + secret: + secretName: {{ .Values.access.customCertificatesSecretName }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if or .Values.artifactory.customVolumes .Values.global.customVolumes }} +{{ tpl (include "artifactory-ha.customVolumes" .) . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.primary.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-priority-class.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-priority-class.yaml new file mode 100644 index 000000000..417ec5c06 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-role.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-role.yaml new file mode 100644 index 000000000..c86bffddd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-rolebinding.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-rolebinding.yaml new file mode 100644 index 000000000..4412870b1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory-ha.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory-ha.fullname" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-secrets.yaml new file mode 100644 index 000000000..5870428ca --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-secrets.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if or .Values.artifactory.masterKey .Values.global.masterKey }} + {{- if not (or .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName) }} + master-key: {{ include "artifactory-ha.masterKey" . | b64enc | quote }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey }} + {{- if not (or .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName) }} + join-key: {{ include "artifactory-ha.joinKey" . | b64enc | quote }} + {{- end }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-service.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-service.yaml new file mode 100644 index 000000000..baacb970f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-service.yaml @@ -0,0 +1,109 @@ +# Service for all Artifactory cluster nodes. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml .| indent 4 }} + {{- end }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + {{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: http-router + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: http-artifactory + {{- with .Values.artifactory.node.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: tcp-jmx + {{- end }} + {{- end }} + selector: +{{- if eq (int .Values.artifactory.node.replicaCount) 0 }} + role: {{ template "artifactory-ha.primary.name" . }} +{{- else if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} +{{- end }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} +--- +# Internal service for Artifactory primary node only! +# Used by member nodes to check readiness of primary node before starting up +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +spec: + # Statically setting service type to ClusterIP since this is an internal only service + type: ClusterIP + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: http-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: http-artifactory + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + {{- with .Values.artifactory.primary.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: tcp-jmx + {{- end }} + {{- end }} + selector: + role: {{ template "artifactory-ha.primary.name" . }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-serviceaccount.yaml new file mode 100644 index 000000000..6983c1d12 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.serviceAccountName" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-storage-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-storage-pvc.yaml new file mode 100644 index 000000000..e0bfa6b11 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-storage-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.artifactory.customPersistentVolumeClaim }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + labels: + app: {{ template "artifactory-ha.name" . }} + version: "{{ .Values.artifactory.version }}" + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + accessModes: + {{- range .Values.artifactory.customPersistentVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentVolumeClaim.size | quote }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-system-yaml.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-system-yaml.yaml new file mode 100644 index 000000000..aaa1be152 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/artifactory-system-yaml.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.systemYamlOverride.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.primary.name" . }}-system-yaml + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/filebeat-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..d2db2a067 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.name" . }}-filebeat-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/ingress.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/ingress.yaml new file mode 100644 index 000000000..53f7c93ec --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/ingress.yaml @@ -0,0 +1,149 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- $ingressName := default ( include "artifactory-ha.fullname" . ) .Values.ingress.name -}} +{{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $ingressName }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- if .Values.artifactory.replicator.enabled }} +--- +{{- $replicationIngressName := default ( include "artifactory-ha.replicator.fullname" . ) .Values.artifactory.replicator.ingress.name -}} +{{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $replicationIngressName }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.ingress.annotations }} + annotations: +{{ .Values.artifactory.replicator.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.ingress.hosts }} + {{- range $host := .Values.artifactory.replicator.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: /replicator/ + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: /artifactory/api/replication/replicate/file/streaming + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.ingress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- if and .Values.artifactory.replicator.enabled .Values.artifactory.replicator.trackerIngress.enabled }} +--- +{{- $replicatorTrackerIngressName := default ( include "artifactory-ha.replicator.tracker.fullname" . ) .Values.artifactory.replicator.trackerIngress.name -}} + {{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 + {{- else }} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ $replicatorTrackerIngressName }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.trackerIngress.annotations }} + annotations: +{{ .Values.artifactory.replicator.trackerIngress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.trackerIngress.hosts }} + {{- range $host := .Values.artifactory.replicator.trackerIngress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: / + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.trackerIngress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.trackerIngress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- if .Values.customIngress }} +--- +{{ .Values.customIngress | toYaml | trimSuffix "\n" }} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/logger-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/logger-configmap.yaml new file mode 100644 index 000000000..87fe8999e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-logger + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-artifactory-conf.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-artifactory-conf.yaml new file mode 100644 index 000000000..eb1f0e698 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-certificate-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-certificate-secret.yaml new file mode 100644 index 000000000..29c77ad5a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.enabled .Values.nginx.https.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory-ha.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-conf.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-conf.yaml new file mode 100644 index 000000000..5f424d52a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-deployment.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-deployment.yaml new file mode 100644 index 000000000..0bc7ae48c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-deployment.yaml @@ -0,0 +1,217 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: {{ .Values.nginx.kind }} +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: +{{- if ne .Values.nginx.kind "DaemonSet" }} + replicas: {{ .Values.nginx.replicaCount }} +{{- end }} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName | quote }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "nginx") }} + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + name: http + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + name: http-internal + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + name: https + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + name: https-internal + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.nginx.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + {{- end }} + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.startupProbe.enabled }} + startupProbe: + httpGet: + path: {{ .Values.nginx.startupProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.startupProbe.periodSeconds }} + failureThreshold: {{ .Values.nginx.startupProbe.failureThreshold }} + timeoutSeconds: {{ .Values.nginx.startupProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + resources: +{{ toYaml $.Values.nginx.loggersResources | indent 10 }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + {{- end }} + + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory-ha.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pdb.yaml new file mode 100644 index 000000000..8310377a6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.nginx.name }} + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.nginx.minAvailable }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pvc.yaml new file mode 100644 index 000000000..0e573f383 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled) (eq (int .Values.nginx.replicaCount) 1) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClassName }} +{{- if (eq "-" .Values.nginx.persistence.storageClassName) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClassName }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-service.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-service.yaml new file mode 100644 index 000000000..594717cf9 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/templates/nginx-service.yaml @@ -0,0 +1,79 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + {{- if .Values.nginx.service.labels }} +{{ toYaml .Values.nginx.service.labels | indent 4 }} + {{- end }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} + {{- if and (eq .Values.nginx.service.type "ClusterIP") .Values.nginx.service.clusterIP }} + clusterIP: {{ .Values.nginx.service.clusterIP }} + {{- end }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later verion + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if or .Values.nginx.https.enabled .Values.nginx.service.ssloffload }} + - port: {{ .Values.nginx.https.externalPort }} + {{- if .Values.nginx.service.ssloffload }} + targetPort: {{ .Values.nginx.http.internalPort }} + {{- else }} + targetPort: {{ .Values.nginx.https.internalPort}} + {{- end }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.nginx.ssh.externalPort }} + targetPort: {{ .Values.nginx.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + selector: + app: {{ template "artifactory-ha.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/values-large.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/values-large.yaml new file mode 100644 index 000000000..ec05d2add --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/values-large.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" + node: + replicaCount: 3 + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/values-medium.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/values-medium.yaml new file mode 100644 index 000000000..33879c00b --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/values-medium.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" + node: + replicaCount: 2 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/values-small.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/values-small.yaml new file mode 100644 index 000000000..4babf97cb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/values-small.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" + node: + replicaCount: 1 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/charts/artifactory-ha/artifactory-ha/4.13.000/values.yaml b/charts/artifactory-ha/artifactory-ha/4.13.000/values.yaml new file mode 100644 index 000000000..f0b943cc4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.13.000/values.yaml @@ -0,0 +1,1710 @@ +# Default values for artifactory-ha. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +global: + # imageRegistry: releases-docker.jfrog.io + # imagePullSecrets: + # - myRegistryKeySecretName + ## Chart.AppVersion can be overidden using global.versions.artifactory or .Values.artifactory.image.tag + ## Note: Order of preference is 1) global.versions 2) .Values.artifactory.image.tag 3) Chart.AppVersion + ## This applies also for nginx images (.Values.nginx.image.tag) + versions: {} + # artifactory: + # joinKey: + # masterKey: + # joinKeySecretName: + # masterKeySecretName: + # customInitContainersBegin: | + + # customInitContainers: | + + # customVolumes: | + + # customVolumeMounts: | + + # customSidecarContainers: | + + ## certificates added to this secret will be copied to $JFROG_HOME/artifactory/var/etc/security/keys/trusted directory + customCertificates: + enabled: false + # certificateSecretName: + +initContainerImage: releases-docker.jfrog.io/alpine:3.13.1 + +installer: + type: + platform: + +installerInfo: '{"productId": "Helm_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + +# For supporting pulling from private registries +# imagePullSecrets: +# - myRegistryKeySecretName + +## Artifactory systemYaml override +## This is for advanced usecases where users wants to provide their own systemYaml for configuring artifactory +## Refer: https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML +## Note: This will override existing (default) .Values.artifactory.systemYaml in values.yaml +## Alternatively, systemYaml can be overidden via customInitContainers using external sources like vaults, external repositories etc. Please refer customInitContainer section below for an example. +## Note: Order of preference is 1) customInitContainers 2) systemYamlOverride existingSecret 3) default systemYaml in values.yaml +systemYamlOverride: +## You can use a pre-existing secret by specifying existingSecret + existingSecret: +## The dataKey should be the name of the secret data key created. + dataKey: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + +## Allows to add custom ingress +customIngress: | + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory-ha + + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the postgresql dependency +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: releases-docker.jfrog.io + repository: bitnami/postgresql + tag: 12.5.0-debian-10-r25 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlExtendedConf: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + master: + nodeSelector: {} + affinity: {} + tolerations: [] + slave: + nodeSelector: {} + affinity: {} + tolerations: [] + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## you MUST specify custom database details here or Artifactory will NOT start +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "rds-artifactory" + # key: "db-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +logger: + image: + registry: releases-docker.jfrog.io + repository: busybox + tag: 1.32.1 + +# Artifactory +artifactory: + name: artifactory-ha + # Note that by default we use appVersion to get image tag/version + image: + registry: releases-docker.jfrog.io + repository: jfrog/artifactory-pro + # tag: + pullPolicy: IfNotPresent + + # Create a priority class for the Artifactory pods or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 200 + extraConfig: 'acceptCount="100"' + + # certificates added to this secret will be copied to $JFROG_HOME/artifactory/var/etc/security/keys/trusted directory + customCertificates: + enabled: false + # certificateSecretName: + + # Support for open metrics is only available for Artifactory 7.7.x (appVersions) and above. + # To enable set `.Values.artifactory.openMetrics.enabled` to `true` + # Refer - https://www.jfrog.com/confluence/display/JFROG/Open+Metrics + openMetrics: + enabled: false + + # This directory is intended for use with NFS eventual configuration for HA + haDataDir: + enabled: false + path: + haBackupDir: + enabled: false + path: + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_bootstrap/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + # # Absolute path + # - source: /artifactory_bootstrap/artifactory.cluster.license + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - access-audit.log + # - access-request.log + # - access-security-audit.log + # - access-service.log + # - artifactory-access.log + # - artifactory-event.log + # - artifactory-import-export.log + # - artifactory-request.log + # - artifactory-service.log + # - frontend-request.log + # - frontend-service.log + # - metadata-request.log + # - metadata-service.log + # - router-request.log + # - router-service.log + # - router-traefik.log + # - derby.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - tomcat-catalina.log + # - tomcat-localhost.log + + # Tomcat (catalina) loggers resources + catalinaLoggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 + ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + ## Add custom init containers + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-systemyaml-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'wget -O {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml https:///systemyaml' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # allowPrivilegeEscalation: false + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + # If skipPrepareContainer is set to true , this will skip the prepare-custom-persistent-volume init container + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + # skipPrepareContainer: false + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key! + ## You can generate one with the command: "openssl rand -hex 32" + ## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}' + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + ## IMPORTANT: This is a mandatory for fresh Install of 7.x (App version) + # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + # masterKeySecretName: + + ## Join Key to connect to other services to Artifactory. + ## IMPORTANT: Setting this value overrides the existing joinKey + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + + # Add custom secrets - secret per file + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + + binarystore: + enabled: true + + ## admin allows to set the password for the default admin user. + ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate + admin: + ip: "127.0.0.1" + username: "admin" + password: + secret: + dataKey: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.cluster.license and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + extraEnvironmentVariables: + # - name: SERVER_XML_ARTIFACTORY_PORT + # value: "8081" + # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS + # value: "200" + # - name: SERVER_XML_ACCESS_MAX_THREADS + # value: "50" + # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_ACCESS_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_EXTRA_CONNECTOR + # value: "" + # - name: DB_POOL_MAX_ACTIVE + # value: "100" + # - name: DB_POOL_MAX_IDLE + # value: "10" + # - name: MY_SECRET_ENV_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret-name + # key: my-secret-key + + # TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes) + systemYaml: | + shared: + logging: + consoleLog: + enabled: {{ .Values.artifactory.consoleLog }} + extraJavaOpts: > + -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }} + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}" + host: "" + driver: org.postgresql.Driver + username: "{{ .Values.postgresql.postgresqlUsername }}" + {{ else }} + type: "{{ .Values.database.type }}" + driver: "{{ .Values.database.driver }}" + {{- end }} + artifactory: + {{- if .Values.artifactory.openMetrics }} + metrics: + enabled: {{ .Values.artifactory.openMetrics.enabled }} + {{- end }} + {{- if or .Values.artifactory.haDataDir.enabled .Values.artifactory.haBackupDir.enabled }} + node: + {{- if .Values.artifactory.haDataDir.path }} + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + {{- if .Values.artifactory.haBackupDir.path }} + haBackupDir: {{ .Values.artifactory.haBackupDir.path }} + {{- end }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }} + frontend: + session: + timeMinutes: {{ .Values.frontend.session.timeoutMinutes | quote }} + access: + database: + maxOpenConnections: {{ .Values.access.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.access.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.access.tomcat.connector.extraConfig }} + {{- if .Values.access.database.enabled }} + type: "{{ .Values.access.database.type }}" + url: "{{ .Values.access.database.url }}" + driver: "{{ .Values.access.database.driver }}" + username: "{{ .Values.access.database.user }}" + password: "{{ .Values.access.database.password }}" + {{- end }} + metadata: + database: + maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }} + {{- if .Values.artifactory.replicator.enabled }} + replicator: + enabled: true + {{- end }} + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + gid: 1030 + terminationGracePeriodSeconds: 30 + + ## By default, the Artifactory StatefulSet is created with a securityContext that sets the `runAsUser` and the `fsGroup` to the `artifactory.uid` value. + ## If you want to disable the securityContext for the Artifactory StatefulSet, set this tag to false + setSecurityContext: true + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 0 + failureThreshold: 10 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 0 + failureThreshold: 10 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + startupProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 30 + failureThreshold: 60 + periodSeconds: 5 + timeoutSeconds: 5 + + persistence: + enabled: true + local: false + redundancy: 3 + mountPath: "/var/opt/jfrog/artifactory" + accessMode: ReadWriteOnce + size: 200Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + + maxCacheSize: 50000000000 + cacheProviderDir: cache + eventual: + numberOfThreads: 10 + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## aws-s3-v3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + + {{- else }} + + {{- end }} + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + + false + {{- else }} + + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{- end }} + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .maxConnections }} + {{ . }} + {{- end }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + {{- if .useInstanceCredentials }} + true + {{- else }} + false + {{- end }} + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + {{- with .enableSignedUrlRedirect }} + {{ . }} + {{- end }} + {{- with .enablePathStyleAccess }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.multiPartLimit }} + {{ .Values.artifactory.persistence.azureBlob.multipartElementSize }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type file-system + fileSystem: + ## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes. + ## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1) + ## Create PVCs with ReadWriteMany that match the naming convetions: + ## {{ template "artifactory-ha.fullname" . }}-data-pvc- + ## {{ template "artifactory-ha.fullname" . }}-backup-pvc + ## Example (using numberOfExistingClaims: 2) + ## myexample-data-pvc-0 + ## myexample-data-pvc-1 + ## myexample-backup-pvc + ## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive. + + ## Need to have the following set + existingSharedClaim: + enabled: false + numberOfExistingClaims: 1 + ## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }} + dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data" + backupDir: "/var/opt/jfrog/artifactory-backup" + + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory-ha" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + mountOptions: [] + ## For artifactory.persistence.type google-storage + googleStorage: + ## When using GCP buckets as your binary store (Available with enterprise license only) + gcpServiceAccount: + enabled: false + ## Use either an existing secret prepared in advance or put the config (replace the content) in the values + ## ref: https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#google-storage + # customSecretName: + # config: | + # { + # "type": "service_account", + # "project_id": "", + # "private_key_id": "?????", + # "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + # "client_email": "???@j.iam.gserviceaccount.com", + # "client_id": "???????", + # "auth_uri": "https://accounts.google.com/o/oauth2/auth", + # "token_uri": "https://oauth2.googleapis.com/token", + # "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + # "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + # } + endpoint: commondatastorage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-ha-gcp" + identity: + credential: + path: "artifactory-ha/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + maxConnections: 50 + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + enableSignedUrlRedirect: false + enablePathStyleAccess: false + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-ha-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory-ha/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: "AWS4-HMAC-SHA256" + + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + multiPartLimit: 100000000 + multipartElementSize: 50000000 + testConnection: false + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them) + ## Supported pool values + ## members + ## all + pool: members + + ## The following Java options are passed to the java process running Artifactory. + ## This will be passed to all cluster members. Primary and member nodes. + javaOpts: {} + # other: "" + + ## The following setting are to configure a dedicated Ingress object for Replicator service + replicator: + enabled: false + ingress: + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + ## When replicator is enabled and want to use tracker feature, trackerIngress.enabled flag should be set to true + ## Please refer - https://www.jfrog.com/confluence/display/JFROG/JFrog+Peer-to-Peer+%28P2P%29+Downloads + trackerIngress: + enabled: false + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + + ssh: + enabled: false + internalPort: 1339 + externalPort: 1339 + + annotations: {} + + ## Type specific configurations. + ## There is a difference between the primary and the member nodes. + ## Customising their resources and java parameters is done here. + primary: + name: artifactory-ha-primary + # preStartCommand specific to the primary node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0` + existingClaim: false + + replicaCount: 1 + # minAvailable: 1 + + updateStrategy: + type: RollingUpdate + + ## Resources for the primary node + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory primary node. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + nodeSelector: {} + + tolerations: [] + + affinity: {} + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + + node: + name: artifactory-ha-member + # preStartCommand specific to the member node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0` + existingClaim: false + replicaCount: 2 + updateStrategy: + type: RollingUpdate + minAvailable: 1 + ## Resources for the member nodes + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory member nodes. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + # xms: "1g" + # xmx: "2g" + # other: "" + nodeSelector: {} + + ## Wait for Artifactory primary + waitForPrimaryStartup: + enabled: true + + ## Setting time will override the built in test and will just wait the set time + time: + + tolerations: [] + + ## Complete specification of the "affinity" of the member nodes; if this is non-empty, + ## "podAntiAffinity" values are not used. + affinity: {} + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + +frontend: + ## Session settings + session: + ## Time in minutes after which the frontend token will need to be refreshed + timeoutMinutes: '30' + +access: + ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. + ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates + ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes. + ## This ensures that the node to node communication is done over TLS. + accessConfig: + security: + tls: false + + ## You can use a pre-existing secret by specifying customCertificatesSecretName + ## Example : Create a tls secret using `kubectl create secret tls --cert=ca.crt --key=ca.private.key` + # customCertificatesSecretName: + + ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key + # resetAccessCAKeys: false + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 50 + extraConfig: 'acceptCount="100"' + +metadata: + database: + maxOpenConnections: 80 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + + +# Nginx +nginx: + enabled: true + kind: Deployment + name: nginx + labels: {} + replicaCount: 1 + minAvailable: 0 + uid: 104 + gid: 107 + # Note that by default we use appVersion to get image tag/version + image: + registry: releases-docker.jfrog.io + repository: jfrog/nginx-artifactory-pro + # tag: + pullPolicy: IfNotPresent + + # Priority Class name to be used in deployment if provided + priorityClassName: + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + + # Logs options + logs: + stderr: false + level: warn + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + + {{ if .Values.nginx.logs.stderr }} + error_log stderr {{ .Values.nginx.logs.level }}; + {{- else -}} + error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }}; + {{- end }} + pid /tmp/nginx.pid; + + {{- if .Values.artifactory.ssh.enabled }} + ## SSH Server Configuration + stream { + server { + listen {{ .Values.nginx.ssh.internalPort }}; + proxy_pass {{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }}; + } + } + {{- end }} + + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 128k; + proxy_buffers 40 128k; + proxy_busy_buffers_size 128k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + + artifactoryConf: | + {{- if .Values.nginx.https.enabled }} + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + {{- end }} + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ . }} + {{- end -}} + {{- end -}}; + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + {{- if .Values.nginx.service.ssloffload}} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host; + {{- else }} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + {{- end }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + add_header Strict-Transport-Security always; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + ssloffload: false + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + labels: {} + # label-key: label-value + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + # DEPRECATED: The following will be replaced by L1065-L1076 in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ssh: + internalPort: 1339 + externalPort: 1339 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 0 + failureThreshold: 10 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 0 + failureThreshold: 10 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + startupProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 30 + failureThreshold: 60 + periodSeconds: 5 + timeoutSeconds: 5 + + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.9.2 + logstashUrl: "logstash:5044" + + terminationGracePeriod: 10 + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] + +## Allows to add additional kubernetes resources +## Use --- as a separator between multiple resources +## For an example, refer - https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-ha-values.yaml +additionalResources: | + +# Adding entries to a Pod's /etc/hosts file +# For an example, refer - https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/.helmignore b/charts/artifactory-ha/artifactory-ha/4.7.600/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/CHANGELOG.md b/charts/artifactory-ha/artifactory-ha/4.7.600/CHANGELOG.md new file mode 100644 index 000000000..d39066f5a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/CHANGELOG.md @@ -0,0 +1,989 @@ +# JFrog Artifactory-ha Chart Changelog +All changes to this chart will be documented in this file + +## [4.7.6] - Jan 11, 2020 +* Updated Artifactory version to 7.12.6 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.12.6) + +## [4.7.5] - Jan 07, 2020 +* Added support for optional tracker dedicated ingress `.Values.artifactory.replicator.trackerIngress.enabled` (defaults to false) + +## [4.7.4] - Jan 04, 2020 +* Fixed gid support for statefulset + +## [4.7.3] - Dec 31, 2020 +* Added gid support for statefulset +* Add setSecurityContext flag to allow securityContext block to be removed from artifactory statefulset + +## [4.7.2] - Dec 29, 2020 +* **Important:** Removed `.Values.metrics` and `.Values.fluentd` (Fluentd and Prometheus integrations) +* Add support for creating additional kubernetes resources - [refer here](https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-ha-values.yaml) +* Updated Artifactory version to 7.12.5 + +## [4.7.1] - Dec 21, 2020 +* Updated Artifactory version to 7.12.3 + +## [4.7.0] - Dec 18, 2020 +* Updated Artifactory version to 7.12.2 +* Added `.Values.artifactory.openMetrics.enabled` + +## [4.6.1] - Dec 11, 2020 +* Added configurable `.Values.global.versions.artifactory` in values.yaml + +## [4.6.0] - Dec 10, 2020 +* Update postgresql tag version to `12.5.0-debian-10-r25` +* Fixed `artifactory.persistence.googleStorage.endpoint` from `storage.googleapis.com` to `commondatastorage.googleapis.com` +* Updated chart maintainers email + +## [4.5.5] - Dec 4, 2020 +* **Important:** Renamed `.Values.systemYaml` to `.Values.systemYamlOverride` + +## [4.5.4] - Dec 1, 2020 +* Improve error message returned when attempting helm upgrade command + +## [4.5.3] - Nov 30, 2020 +* Updated Artifactory version to 7.11.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) + +# [4.5.2] - Nov 23, 2020 +* Updated Artifactory version to 7.11.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) +* Updated port namings on services and pods to allow for istio protocol discovery +* Change semverCompare checks to support hosted Kubernetes +* Add flag to disable creation of ServiceMonitor when enabling prometheus metrics +* Prevent the PostHook command to be executed if the user did not specify a command in the values file +* Fix issue with tls file generation when nginx.https.enabled is false + +## [4.5.1] - Nov 19, 2020 +* Updated Artifactory version to 7.11.2 +* Bugfix - access.config.import.xml override Access Federation configurations + +## [4.5.0] - Nov 17, 2020 +* Updated Artifactory version to 7.11.1 +* Update alpine tag version to `3.12.1` + +## [4.4.6] - Nov 10, 2020 +* Pass system.yaml via external secret for advanced usecases +* Added support for custom ingress +* Bugfix - stateful set not picking up changes to database secrets + +## [4.4.5] - Nov 9, 2020 +* Updated Artifactory version to 7.10.6 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.6) + +## [4.4.4] - Nov 2, 2020 +* Add enablePathStyleAccess property for aws-s3-v3 binary provider template + +## [4.4.3] - Nov 2, 2020 +* Updated Artifactory version to 7.10.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.5) + +## [4.4.2] - Oct 22, 2020 +* Chown bug fix where Linux capability cannot chown all files causing log line warnings +* Fix Frontend timeout linting issue + +## [4.4.1] - Oct 20, 2020 +* Add flag to disable prepare-custom-persistent-volume init container + +## [4.4.0] - Oct 19, 2020 +* Updated Artifactory version to 7.10.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.2) + +## [4.3.4] - Oct 19, 2020 +* Add support to specify priorityClassName for nginx deployment + +## [4.3.3] - Oct 15, 2020 +* Fixed issue with node PodDisruptionBudget which also getting applied on the primary +* Fix mandatory masterKey check issue when upgrading from 6.x to 7.x + +## [4.3.2] - Oct 14, 2020 +* Add support to allow more than 1 Primary in Artifactory-ha STS + +## [4.3.1] - Oct 9, 2020 +* Add global support for customInitContainersBegin + +## [4.3.0] - Oct 07, 2020 +* Updated Artifactory version to 7.9.1 +* **Breaking change:** Fix `storageClass` to correct `storageClassName` in values.yaml + +## [4.2.0] - Oct 5, 2020 +* Expose Prometheus metrics via a ServiceMonitor +* Parse log files for metric data with Fluentd + +## [4.1.0] - Sep 30, 2020 +* Updated Artifactory version to 7.9.0 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.9) + +## [4.0.12] - Sep 25, 2020 +* Update to use linux capability CAP_CHOWN instead of root base init container to avoid any use of root containers to pass Redhat security requirements + +## [4.0.11] - Sep 28, 2020 +* Setting chart coordinates in migitation yaml + +## [4.0.10] - Sep 25, 2020 +* Update filebeat version to `7.9.2` + +## [4.0.9] - Sep 24, 2020 +* Fixed broken issue - when setting `waitForDatabase:false` container startup still waits for DB + +## [4.0.8] - Sep 22, 2020 +* Updated readme + +## [4.0.7] - Sep 22, 2020 +* Fix lint issue in migitation yaml + +## [4.0.6] - Sep 22, 2020 +* Fix broken migitation yaml + +## [4.0.5] - Sep 21, 2020 +* Added mitigation yaml for Artifactory - [More info](https://github.com/jfrog/chartcenter/blob/master/docs/securitymitigationspec.md) + +## [4.0.4] - Sep 17, 2020 +* Added configurable session(UI) timeout in frontend microservice + +## [4.0.3] - Sep 17, 2020 +* Fix small typo in README and added proper required text to be shown while postgres upgrades + +## [4.0.2] - Sep 14, 2020 +* Updated Artifactory version to 7.7.8 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7.8) + +## [4.0.1] - Sep 8, 2020 +* Added support for artifactory pro license (single node) installation. + +## [4.0.0] - Sep 2, 2020 +* **Breaking change:** Changed `imagePullSecrets` value from string to list +* **Breaking change:** Added `image.registry` and changed `image.version` to `image.tag` for docker images +* Added support for global values +* Updated maintainers in chart.yaml +* Update postgresql tag version to `12.3.0-debian-10-r71` +* Update postgresqlsub chart version to `9.3.4` - [9.x Upgrade Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#900) +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass previous 9.x/10.x's postgresql.image.tag and databaseUpgradeReady=true. + +## [3.1.0] - Aug 13, 2020 +* Updated Artifactory version to 7.7.3 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7) + +## [3.0.15] - Aug 10, 2020 +* Added enableSignedUrlRedirect for persistent storage type aws-s3-v3. + +## [3.0.14] - Jul 31, 2020 +* Update the README section on Nginx SSL termination to reflect the actual YAML structure. + +## [3.0.13] - Jul 30, 2020 +* Added condition to disable the migration scripts. + +## [3.0.12] - Jul 29, 2020 +* Document Artifactory node affinity. + +## [3.0.11] - Jul 28, 2020 +* Added maxConnections for persistent storage type aws-s3-v3. + +## [3.0.10] - Jul 28, 2020 +Bugfix / support for userPluginSecrets with Artifactory 7 + +## [3.0.9] - Jul 27, 2020 +* Add tpl to external database secrets. +* Modified `scheme` to `artifactory-ha.scheme` + +## [3.0.8] - Jul 23, 2020 +* Added condition to disable the migration init container. + +## [3.0.7] - Jul 21, 2020 +* Updated Artifactory-ha Chart to add node and primary labels to pods and service objects. + +## [3.0.6] - Jul 20, 2020 +* Support custom CA and certificates + +## [3.0.5] - Jul 13, 2020 +* Updated Artifactory version to 7.6.3 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.3 +* Fixed Mysql database jar path in `preStartCommand` in README + +## [3.0.4] - Jul 8, 2020 +* Move some postgresql values to where they should be according to the subchart + +## [3.0.3] - Jul 8, 2020 +* Set Artifactory access client connections to the same value as the access threads. + +## [3.0.2] - Jul 6, 2020 +* Updated Artifactory version to 7.6.2 +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [3.0.1] - Jul 01, 2020 +* Add dedicated ingress object for Replicator service when enabled + +## [3.0.0] - Jun 30, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update busybox tag version to `1.31.1` +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [2.6.0] - Jun 29, 2020 +* Updated Artifactory version to 7.6.1 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.1 +* Add tpl for external database secrets + +## [2.5.8] - Jun 25, 2020 +* Stop loading the Nginx stream module because it is now a core module + +## [2.5.7] - Jun 18, 2020 +* Fixes bootstrap configMap issue on member node + +## [2.5.6] - Jun 11, 2020 +* Support list of custom secrets + +## [2.5.5] - Jun 11, 2020 +* NOTES.txt fixed incorrect information + +## [2.5.4] - Jun 12, 2020 +* Updated Artifactory version to 7.5.7 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5.7 + +## [2.5.3] - Jun 8, 2020 +* Statically setting primary service type to ClusterIP. +* Prevents primary service from being exposed publicly when using LoadBalancer type on cloud providers. + +## [2.5.2] - Jun 8, 2020 +* Readme update - configuring Artifactory with oracledb + +## [2.5.1] - Jun 5, 2020 +* Fixes broken PDB issue upgrading from 6.x to 7.x + +## [2.5.0] - Jun 1, 2020 +* Updated Artifactory version to 7.5.5 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5 +* Fixes bootstrap configMap permission issue +* Update postgresql tag version to `9.6.18-debian-10-r7` + +## [2.4.10] - May 27, 2020 +* Added Tomcat maxThreads & acceptCount + +## [2.4.9] - May 25, 2020 +* Fixed postgresql README `image` Parameters + +## [2.4.8] - May 24, 2020 +* Fixed typo in README regarding migration timeout + +## [2.4.7] - May 19, 2020 +* Added metadata maxOpenConnections + +## [2.4.6] - May 07, 2020 +* Fix `installerInfo` string format + +## [2.4.5] - Apr 27, 2020 +* Updated Artifactory version to 7.4.3 + +## [2.4.4] - Apr 27, 2020 +* Change customInitContainers order to run before the "migration-ha-artifactory" initContainer + +## [2.4.3] - Apr 24, 2020 +* Fix `artifactory.persistence.awsS3V3.useInstanceCredentials` incorrect conditional logic +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [2.4.2] - Apr 16, 2020 +* Custom volume mounts in migration init container. + +## [2.4.1] - Apr 16, 2020 +* Fix broken support for gcpServiceAccount for googleStorage + +## [2.4.0] - Apr 14, 2020 +* Updated Artifactory version to 7.4.1 + +## [2.3.1] - April 13, 2020 +* Update README with helm v3 commands + +## [2.3.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml +* Bump postgresql tag version to `9.6.17-debian-10-r21` in values.yaml + +## [2.2.11] - Apr 8, 2020 +* Added recommended ingress annotation to avoid 413 errors + +## [2.2.10] - Apr 8, 2020 +* Moved migration scripts under `files` directory +* Support preStartCommand in migration Init container as `artifactory.migration.preStartCommand` + +## [2.2.9] - Apr 01, 2020 +* Support masterKey and joinKey as secrets + +## [2.2.8] - Apr 01, 2020 +* Ensure that the join key is also copied when provided by an external secret +* Migration container in primary and node statefulset now respects custom versions and the specified node/primary resources + +## [2.2.7] - Apr 01, 2020 +* Added cache-layer in chain definition of Google Cloud Storage template +* Fix readme use to `-hex 32` instead of `-hex 16` + +## [2.2.6] - Mar 31, 2020 +* Change the way the artifactory `command:` is set so it will properly pass a SIGTERM to java + +## [2.2.5] - Mar 31, 2020 +* Removed duplicate `artifactory-license` volume from primary node + +## [2.2.4] - Mar 31, 2020 +* Restore `artifactory-license` volume for the primary node + +## [2.2.3] - Mar 29, 2020 +* Add Nginx log options: stderr as logfile and log level + +## [2.2.2] - Mar 30, 2020 +* Apply initContainers.resources to `copy-system-yaml`, `prepare-custom-persistent-volume`, and `migration-artifactory-ha` containers +* Use the same defaulting mechanism used for the artifactory version used elsewhere in the chart +* Removed duplicate `artifactory-license` volume that prevented using an external secret + +## [2.2.1] - Mar 29, 2020 +* Fix loggers sidecars configurations to support new file system layout and new log names + +## [2.2.0] - Mar 29, 2020 +* Fix broken admin user bootstrap configuration +* **Breaking change:** renamed `artifactory.accessAdmin` to `artifactory.admin` + +## [2.1.3] - Mar 24, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [2.1.2] - Mar 21, 2020 +* Support for SSL offload in Nginx service(LoadBalancer) layer. Introduced `nginx.service.ssloffload` field with boolean type. + +## [2.1.1] - Mar 23, 2020 +* Moved installer info to values.yaml so it is fully customizable + +## [2.1.0] - Mar 23, 2020 +* Updated Artifactory version to 7.3.2 + +## [2.0.36] - Mar 20, 2020 +* Add support GCP credentials.json authentication + +## [2.0.35] - Mar 20, 2020 +* Add support for masterKey trim during 6.x to 7.x migration if 6.x masterKey is 32 hex (64 characters) + +## [2.0.34] - Mar 19, 2020 +* Add support for NFS directories `haBackupDir` and `haDataDir` + +## [2.0.33] - Mar 18, 2020 +* Increased Nginx proxy_buffers size + +## [2.0.32] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files +* useInstanceCredentials variable was declared in S3 settings but not used in chart. Now it is being used. + +## [2.0.31] - Mar 17, 2020 +* Fix rendering of Service Account annotations + +## [2.0.30] - Mar 16, 2020 +* Add Unsupported message from 6.18 to 7.2.x (migration) + +## [2.0.29] - Mar 11, 2020 +* Upgrade Docs update + +## [2.0.28] - Mar 11, 2020 +* Unified charts public release + +## [2.0.27] - Mar 8, 2020 +* Add an optional wait for primary node to be ready with a proper test for http status + +## [2.0.23] - Mar 6, 2020 +* Fix path to `/artifactory_bootstrap` +* Add support for controlling the name of the ingress and allow to set more than one cname + +## [2.0.22] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [2.0.21] - Feb 28, 2020 +* Add support to process `valueFrom` for extraEnvironmentVariables + +## [2.0.20] - Feb 26, 2020 +* Store join key to secret + +## [2.0.19] - Feb 26, 2020 +* Updated Artifactory version to 7.2.1 + +## [2.0.12] - Feb 07, 2020 +* Remove protection flag `databaseUpgradeReady` which was added to check internal postgres upgrade + +## [2.0.0] - Feb 07, 2020 +* Updated Artifactory version to 7.0.0 + +## [1.4.10] - Feb 13, 2020 +* Add support for SSH authentication to Artifactory + +## [1.4.9] - Feb 10, 2020 +* Fix custom DB password indention + +## [1.4.8] - Feb 9, 2020 +* Add support for `tpl` in the `postStartCommand` + +## [1.4.7] - Feb 4, 2020 +* Support customisable Nginx kind + +## [1.4.6] - Feb 2, 2020 +* Add a comment stating that it is recommended to use an external PostgreSQL with a static password for production installations + +## [1.4.5] - Feb 2, 2020 +* Add support for primary or member node specific preStartCommand + +## [1.4.4] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [1.4.3] - Jan 26, 2020 +* Improve `database.user` and `database.password` logic in order to support more use cases and make the configuration less repetitive + +## [1.4.2] - Jan 22, 2020 +* Refined pod disruption budgets to separate nginx and Artifactory pods + +## [1.4.1] - Jan 19, 2020 +* Fix replicator port config in nginx replicator configmap + +## [1.4.0] - Jan 19, 2020 +* Updated Artifactory version to 6.17.0 + +## [1.3.8] - Jan 16, 2020 +* Added example for external nginx-ingress + +## [1.3.7] - Jan 07, 2020 +* Add support for customizable `mountOptions` of NFS PVs + +## [1.3.6] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [1.3.5] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [1.3.4] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.3] - Dec 16, 2019 +* Another fix for toggling nginx service ports + +## [1.3.2] - Dec 12, 2019 +* Fix for toggling nginx service ports + +## [1.3.1] - Dec 10, 2019 +* Add support for toggling nginx service ports + +## [1.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [1.2.4] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [1.2.3] - Nov 27, 2019 +* Add support for PriorityClass + +## [1.2.2] - Nov 20, 2019 +* Update Artifactory logo + +## [1.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [1.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [1.1.12] - Nov 17, 2019 +* Fix `README.md` format (broken table) + +## [1.1.11] - Nov 17, 2019 +* Update comment on Artifactory master key + +## [1.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [1.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [1.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [1.1.7] - Nov 11, 2019 +* Additional documentation for masterKey + +## [1.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [1.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [1.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [1.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [1.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [1.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [1.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [1.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [1.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [0.17.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [0.17.2] - Oct 21, 2019 +* Add support for setting `artifactory.primary.labels` +* Add support for setting `artifactory.node.labels` +* Add support for setting `nginx.labels` + +## [0.17.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [0.17.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [0.16.7] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [0.16.6] - Sep 24, 2019 +* Add support for setting `nginx.service.labels` + +## [0.16.5] - Sep 23, 2019 +* Add support for setting `artifactory.customInitContainersBegin` + +## [0.16.4] - Sep 20, 2019 +* Add support for setting `initContainers.resources` + +## [0.16.3] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [0.16.2] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [0.16.1] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [0.16.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [0.15.15] - Aug 18, 2019 +* Fix existingSharedClaim permissions issue and example + +## [0.15.14] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [0.15.13] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [0.15.12] - Aug 6, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [0.15.11] - Aug 5, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [0.15.10] - Aug 5, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [0.15.9] - Aug 1, 2019 +* Fix masterkey/masterKeySecretName not specified warning render logic in NOTES.txt + +## [0.15.8] - Jul 28, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [0.15.7] - Jul 25, 2019 +* Updated README about how to apply Artifactory licenses + +## [0.15.6] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [0.15.5] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [0.15.4] - Jul 11, 2019 +* Add `artifactory.customVolumeMounts` support to member node statefulset template + +## [0.15.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [0.15.2] - Jul 3, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [0.15.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [0.15.0] - Jun 27, 2019 +* Updated Artifactory version to 6.11.0 and Restart Primary node when bootstrap.creds file has been modified in artifactory-ha + +## [0.14.4] - Jun 24, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [0.14.3] - Jun 24, 2019 +* Update chart maintainers + +## [0.14.2] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [0.14.1] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [0.14.0] - Jun 20, 2019 +* Use ConfigMaps for nginx configuration and remove nginx postStart command + +## [0.13.10] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [0.13.9] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [0.13.8] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [0.13.7] - Jun 6, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [0.13.6] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [0.13.5] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [0.13.4] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [0.13.3] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [0.13.2] - May 19, 2019 +* Fix missing logger image tag + +## [0.13.1] - May 15, 2019 +* Support `artifactory.persistence.cacheProviderDir` for on-premise cluster + +## [0.13.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [0.12.23] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [0.12.22] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [0.12.21] - Apr 30, 2019 +* Add support for JMX monitoring + +## [0.12.20] - Apr29, 2019 +* Added support for headless services + +## [0.12.19] - Apr 28, 2019 +* Added support for `cacheProviderDir` + +## [0.12.18] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [0.12.17] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [0.12.16] - Apr 12, 2019 +* Added support for `customVolumeMounts` + +## [0.12.15] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [0.12.14] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [0.12.13] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [0.12.12] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [0.12.11] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [0.12.10] - Aprl 07, 2019 +* Change network policy API group + +## [0.12.9] - Aprl 04, 2019 +* Apply the existing PVC for members (in addition to primary) + +## [0.12.8] - Aprl 03, 2019 +* Bugfix for userPluginSecrets + +## [0.12.7] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [0.12.6] - Aprl 03, 2019 +* Added installer info + +## [0.12.5] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [0.12.4] - Apr 02, 2019 +* Fix issue #253 (use existing PVC for data and backup storage) + +## [0.12.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [0.12.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [0.12.1] - Mar 26, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [0.12.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [0.11.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [0.11.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [0.11.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [0.11.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [0.11.14] - Mar 21, 2019 +* Make ingress path configurable + +## [0.11.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart for Primary + +## [0.11.12] - Mar 19, 2019 +* Fix existingClaim example + +## [0.11.11] - Mar 18, 2019 +* Disable the option to use nginx PVC with more than one replica + +## [0.11.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [0.11.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [0.11.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [0.11.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 + +## [0.11.6] - Mar 13, 2019 +* Move securityContext to container level + +## [0.11.5] - Mar 11, 2019 +* Add the option to use existing volume claims for Artifactory storage + +## [0.11.4] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [0.11.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [0.11.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [0.11.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [0.11.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [0.10.3] - Feb 21, 2019 +* Add s3AwsVersion option to awsS3 configuration for use with IAM roles + +## [0.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [0.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [0.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [0.9.7] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [0.9.6] - Feb 7, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [0.9.5] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.4] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [0.9.3] - Feb 5, 2019 +* Remove the inactive server remove plugin + +## [0.9.2] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [0.9.1] - Jan 27, 2019 +* Fix support for Azure Blob Storage Binary provider + +## [0.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [0.8.10] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [0.8.9] - Jan 18, 2019 +* Added support of values ingress.labels + +## [0.8.8] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [0.8.7] - Jan 15, 2018 +* Add support for Azure Blob Storage Binary provider + +## [0.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [0.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [0.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [0.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [0.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [0.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [0.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [0.7.17] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [0.7.16] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [0.7.15] - Dec 9, 2018 +* AWS S3 add `roleName` for using IAM role + +## [0.7.14] - Dec 6, 2018 +* AWS S3 `identity` and `credential` are now added only if have a value to allow using IAM role + +## [0.7.13] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [0.7.12] - Dec 2, 2018 +* Remove Java option "-Dartifactory.locking.provider.type=db". This is already the default setting. + +## [0.7.11] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [0.7.10] - Nov 29, 2018 +* Fixed the volumeMount for the replicator.yaml + +## [0.7.9] - Nov 29, 2018 +* Optionally include primary node into poddisruptionbudget + +## [0.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [0.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [0.7.6] - Nov 18, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [0.7.5] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [0.7.4] - Nov 13, 2018 +* Allow pod anti-affinity settings to include primary node + +## [0.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [0.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [0.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [0.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.9] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [0.6.8] - Oct 22, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [0.6.7] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [0.6.6] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [0.6.5] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow `soft` or `hard` specification of member node anti-affinity +* Allow providing pre-existing secrets containing external database credentials +* Fix `s3` binary store provider to properly use the `cache-fs` provider +* Allow arbitrary properties when using the `s3` binary store provider + +## [0.6.4] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 14, 2018 +* Make S3 endpoint configurable (was hardcoded with `s3.amazonaws.com`) + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [0.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [0.5.3] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [0.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [0.4.7] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [0.4.6] - Sep 25, 2018 +* Add PodDisruptionBudget for member nodes, defaulting to minAvailable of 1 + +## [0.4.4] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 + +## [0.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [0.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/Chart.yaml new file mode 100644 index 000000000..a1f6b3dd9 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha +apiVersion: v1 +appVersion: 7.12.6 +description: Universal Repository Manager supporting all major packaging formats, build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: installers@jfrog.com + name: Chart Maintainers at JFrog +name: artifactory-ha +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 4.7.600 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/LICENSE b/charts/artifactory-ha/artifactory-ha/4.7.600/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/README.md new file mode 100644 index 000000000..47fe4db80 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/README.md @@ -0,0 +1,1267 @@ +# JFrog Artifactory High Availability Helm Chart + +**Heads up: Our Helm Chart docs are moving to our main documentation site. For Artifactory installers, see [Installing Artifactory](https://www.jfrog.com/confluence/display/JFROG/Installing+Artifactory).** + +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory HA license + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database **NOTE:** For production grade installations it is recommended to use an external PostgreSQL +* Deploy an Nginx server + +## Artifactory HA architecture +The Artifactory HA cluster in this chart is made up of +- A single primary node +- Two member nodes, which can be resized at will + +Load balancing is done to the member nodes only. +This leaves the primary node free to handle jobs and tasks and not be interrupted by inbound traffic. +This can be controlled by the parameter `artifactory.service.pool`. +**NOTE:** + Using artifactory pro license (which supports single node only), set `artifactory.node.replicaCount=0` in values.yaml. + To scale from single node to multiple nodes(>1), use Enterprise(+) license and then do an helm upgrade (Each node need a seperate license). + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +**NOTE:** Passing masterKey is mandatory for fresh install of chart (7.x Appversion) + +### Create a unique Master Key +Artifactory HA cluster requires a unique master key. + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} +``` + +### Install Chart +To install the chart with the release name `artifactory-ha`: + +```bash +helm upgrade --install artifactory-ha --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available, and the nodes to complete initial setup. +Follow the instructions outputted by the install command to get the Artifactory IP and URL to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade --namespace artifactory-ha center/jfrog/artifactory-ha --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 +``` +* Note: If you are upgrading from 1.x to 4.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +### Artifactory memory and CPU resources +The Artifactory HA Helm chart comes with support for configured resource requests and limits to all pods. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. + +See more information on [setting resources for your Artifactory based on planned usage](https://www.jfrog.com/confluence/display/RTF/System+Requirements#SystemRequirements-RecommendedHardware). + +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory-ha \ + --set artifactory.primary.resources.requests.cpu="500m" \ + --set artifactory.primary.resources.limits.cpu="2" \ + --set artifactory.primary.resources.requests.memory="1Gi" \ + --set artifactory.primary.resources.limits.memory="4Gi" \ + --set artifactory.primary.javaOpts.xms="1g" \ + --set artifactory.primary.javaOpts.xmx="4g" \ + --set artifactory.node.resources.requests.cpu="500m" \ + --set artifactory.node.resources.limits.cpu="2" \ + --set artifactory.node.resources.requests.memory="1Gi" \ + --set artifactory.node.resources.limits.memory="4Gi" \ + --set artifactory.node.javaOpts.xms="1g" \ + --set artifactory.node.javaOpts.xmx="4g" \ + --set initContainers.resources.requests.cpu="10m" \ + --set initContainers.resources.limits.cpu="250m" \ + --set initContainers.resources.requests.memory="64Mi" \ + --set initContainers.resources.limits.memory="128Mi" \ + --set postgresql.resources.requests.cpu="200m" \ + --set postgresql.resources.limits.cpu="1" \ + --set postgresql.resources.requests.memory="500Mi" \ + --set postgresql.resources.limits.memory="1Gi" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +> Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.[primary|node].javaOpts.xms` and `artifactory.[primary|node].javaOpts.xmx`. + +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + +### Artifactory storage +Artifactory HA support a wide range of storage back ends. You can see more details on [Artifactory HA storage options](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup#HAInstallationandSetup-SettingUpYourStorageConfiguration) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +> **IMPORTANT:** All storage configurations (except NFS) come with a default `artifactory.persistence.redundancy` parameter. +This is used to set how many replicas of a binary should be stored in the cluster's nodes. +Once this value is set on initial deployment, you can not update it using helm. +It is recommended to set this to a number greater than half of your cluster's size, and never scale your cluster down to a size smaller than this number. + +#### Existing volume claim + +###### Primary node +In order to use an existing volume claim for the Artifactory primary storage, you need to: +- Create a persistent volume claim by the name `volume--artifactory-ha-primary-0` e.g `volume-myrelease-artifactory-ha-primary-0` +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.primary.persistence.existingClaim=true +``` + +###### Member nodes +In order to use an existing volume claim for the Artifactory member nodes storage, you need to: +- Create persistent volume claims according to the number of replicas defined at `artifactory.node.replicaCount` by the names `volume--artifactory-ha-member-`, e.g `volume-myrelease-artifactory-ha-member-0` and `volume-myrelease-artifactory-ha-primary-1`. +- Pass a parameter to `helm install` and `helm upgrade` +```bash +... +--set artifactory.node.persistence.existingClaim=true +``` + +#### Existing shared volume claim + +In order to use an existing claim (for data and backup) that is to be shared across all nodes, you need to: + +- Create PVCs with ReadWriteMany that match the naming conventions: +``` + {{ template "artifactory-ha.fullname" . }}-data-pvc- + {{ template "artifactory-ha.fullname" . }}-backup-pvc- +``` +An example that shows 2 existing claims to be used: +``` + myexample-artifactory-ha-data-pvc-0 + myexample-artifactory-ha-backup-pvc-0 + myexample-artifactory-ha-data-pvc-1 + myexample-artifactory-ha-backup-pvc-1 +``` +- Set the artifactory.persistence.fileSystem.existingSharedClaim.enabled in values.yaml to true: +``` +-- set artifactory.persistence.fileSystem.existingSharedClaim.enabled=true +-- set artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims=2 +``` + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` +``` +In order to use a GCP service account, Artifactory needs a gcp.credentials.json file in the same directory asa binaraystore.xml file. +This can be generated by running: +```bash +gcloud iam service-accounts keys create --iam-account +``` +Which will produce the following, which can be saved to a file or copied into your `values.yaml`. +```bash +{ + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." +} +``` + +One option is to create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` in a custom `values.yaml` +```bash +# Create the Kubernetes secret from the file you created earlier. +# IMPORTANT: The file must be called "gcp.credentials.json" because this is used later as the secret key! +kubectl create secret generic artifactory-gcp-creds --from-file=./gcp.credentials.json +``` +Set this secret in your custom `values.yaml` +```bash +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + customSecretName: artifactory-gcp-creds +``` + +Another option is to put your generated config directly in your custom `values.yaml` and the a secret will be created from it +``` +artifactory: + persistence: + googleStorage + gcpServiceAccount: + enabled: true + config: | + { + "type": "service_account", + "project_id": "", + "private_key_id": "?????", + "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + "client_email": "???@j.iam.gserviceaccount.com", + "client_id": "???????", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + } +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike the `aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +To enable [Direct Cloud Storage Download](https://www.jfrog.com/confluence/display/JFROG/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-1.ConfiguretheArtifactoryFilestore) +```bash +... +--set artifactory.persistence.awsS3V3.enableSignedUrlRedirect=true \ +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory-ha --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Create a unique Master Key + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.masterKeySecretName=my-secret --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory-ha --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory-ha --set artifactory.joinKeySecretName=my-secret --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged.. + +### Install Artifactory HA license +For activating Artifactory HA, you must install an appropriate license. There are three ways to manage the license. **Artifactory UI**, **REST API**, or a **Kubernetes Secret**. + +The easier and recommended way is the **Artifactory UI**. Using the **Kubernetes Secret** or **REST API** is for advanced users and is better suited for automation. + +**IMPORTANT:** You should use only one of the following methods. Switching between them while a cluster is running might disable your Artifactory HA cluster! + +##### Artifactory UI +Once primary cluster is running, open Artifactory UI and insert the license(s) in the UI. See [HA installation and setup](https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup) for more details. **Note that you should enter all licenses at once, with each license is separated by a newline.** If you add the licenses one at a time, you may get redirected to a node without a license and the UI won't load for that node. + +##### REST API +You can add licenses via REST API (https://www.jfrog.com/confluence/display/RTF/Artifactory+REST+API#ArtifactoryRESTAPI-InstallHAClusterLicenses). Note that the REST API expects "\n" for the newlines in the licenses. + +##### Kubernetes Secret +You can deploy the Artifactory license(s) as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license(s) written in it. If writing multiple licenses (must be in the same file), it's important to put **two new lines between each license block**! +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-cluster-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + + + + + + + +``` + +```bash +helm upgrade --install artifactory-ha -f values.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory primary and member pods. + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory-ha pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgresql + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory-ha +``` + +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.primary.javaOpts.jmx.port``` and ```artifactory.node.javaOpts.jmx.port``` +to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.primary.javaOpts.jmx.enabled=true \ + --set artifactory.node.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory-ha center/jfrog/artifactory-ha +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with +```artifactory.primary.javaOpts.jmx.host``` and ```artifactory.node.javaOpts.jmx.host```), So in order to connect to Artifactory +with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory-ha--primary + +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-ha--primary: +jconsole : +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory-ha center/jfrog/artifactory-ha -f admin-creds-values.yaml +``` + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory-ha --set artifactory.license.secret=artifactory-cluster-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory-ha --set nginx.customConfigMap=nginx-config --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Scaling your Artifactory cluster +A key feature in Artifactory HA is the ability to set an initial cluster size with `--set artifactory.node.replicaCount=${CLUSTER_SIZE}` and if needed, resize it. + +##### Before scaling +**IMPORTANT:** When scaling, you need to explicitly pass the database password if it's an auto generated one (this is the default with the enclosed PostgreSQL helm chart). + +Get the current database password +```bash +export DB_PASSWORD=$(kubectl get $(kubectl get secret -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +Use `--set postgresql.postgresqlPassword=${DB_PASSWORD}` with every scale action to prevent a miss configured cluster! + +##### Scale up +Let's assume you have a cluster with **2** member nodes, and you want to scale up to **3** member nodes (a total of 4 nodes). +```bash +# Scale to 4 nodes (1 primary and 3 member nodes) +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=3 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +##### Scale down +Let's assume you have a cluster with **3** member nodes, and you want to scale down to **2** member node. + +```bash +# Scale down to 2 member nodes +helm upgrade --install artifactory-ha --set artifactory.node.replicaCount=2 --set postgresql.postgresqlPassword=${DB_PASSWORD} --namespace artifactory-ha center/jfrog/artifactory-ha +``` +- **NOTE:** Since Artifactory is running as a Kubernetes Stateful Set, the removal of the node will **not** remove the persistent volume. You need to explicitly remove it +```bash +# List PVCs +kubectl get pvc + +# Remove the PVC with highest ordinal! +# In this example, the highest node ordinal was 2, so need to remove its storage. +kubectl delete pvc volume-artifactory-node-2 +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the database name. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! +##### Configuring Artifactory with external Oracle database +To use artifactory with oracledb the required instant client library files, libaio has to be copied to tomcat lib. Also set LD_LIBRARY_PATH env variable. +1. Create a value file with the configuration +```yaml +postgresql: + enabled: false +database: + type: oracle + driver: oracle.jdbc.OracleDriver + url: + user: + password: +artifactory: + preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O instantclient-basic-linux.x64-19.6.0.0.0dbru.zip https://download.oracle.com/otn_software/linux/instantclient/19600/instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && unzip -jn instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && wget -O libaio1_0.3.110-3_amd64.deb http://ftp.br.debian.org/debian/pool/main/liba/libaio/libaio1_0.3.110-3_amd64.deb && dpkg-deb -x libaio1_0.3.110-3_amd64.deb . && cp lib/x86_64-linux-gnu/* ." + extraEnvironmentVariables: + - name: LD_LIBRARY_PATH + value: /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory-ha --namespace artifactory -f values-oracle.yaml +``` +**NOTE:** If its an upgrade from 6.x to 7.x, add same `preStartCommand` under `artifactory.migration.preStartCommand` + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +... +``` + +### Deleting Artifactory +To delete the Artifactory HA cluster + +On helm v2: +```bash +helm delete --purge artifactory-ha +``` + +On helm v3: +```bash +helm delete artifactory-ha --namespace artifactory-ha +``` + +This will completely delete your Artifactory HA cluster. +**NOTE:** Since Artifactory is running as Kubernetes Stateful Sets, the removal of the helm release will **not** remove the persistent volumes. You need to explicitly remove them +```bash +kubectl delete pvc -l release=artifactory-ha +``` +See more details in the official [Kubernetes Stateful Set removal page](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/) + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry (for example, when you have a custom image with a MySQL database driver), you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server=${DOCKER_REGISTRY} --docker-username=${DOCKER_USER} --docker-password=${DOCKER_PASS} --docker-email=${DOCKER_EMAIL} +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory-ha --set imagePullSecrets=regsecret --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing custom init containers before and after the predefined init containers in the [values.yaml](values.yaml) . By default it's commented out +```yaml +artifactory: + ## Add custom init containers executed before predefined init containers + customInitContainersBegin: | + ## Init containers template goes here ## + ## Add custom init containers executed after predefined init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Custom secrets +If you need to add a custom secret in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom secrets in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + # Add custom secrets - secret per file + customSecrets: + - name: custom-secret + key: custom-secret.yaml + data: > + secret data +``` + +To use a custom secret, need to define a custom volume. +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + - name: custom-secret + secret: + secretName: custom-secret +``` + +To use a volume, need to define a volume mount as part of a custom init or sidecar container. +```yaml +artifactory: + customSidecarContainers: + - name: side-car-container + volumeMounts: + - name: custom-secret + mountPath: /opt/custom-secret.yaml + subPath: custom-secret.yaml + readOnly: true +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory-ha + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory-ha +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory-ha -f plugins.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory-ha: # Name of the artifactory-ha dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +For additional information, please refer [here](https://www.jfrog.com/confluence/display/JFROG/User+Plugins). + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f configmaps.yaml --namespace artifactory-ha center/jfrog/artifactory-ha +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + +### Establishing TLS and Adding certificates +In HTTPS, the communication protocol is encrypted using Transport Layer Security (TLS). By default, TLS between JFrog Platform nodes is disabled. +When TLS is enabled, JFrog Access acts as the Certificate Authority (CA) signs the TLS certificates used by all the different JFrog Platform nodes. + +To establish TLS between JFrog Platform nodes: +Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. For more info, Please refer [here](https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates) + +To enable tls in charts, set `tls` to true under `access` in [values.yaml](values.yaml). By default it's false +```yaml +access: + accessConfig: + security: + tls: true +``` + +To add custom tls certificates, create a tls secret from the certificate files. + +```bash +kubectl create secret tls --cert=ca.crt --key=ca.private.key +``` + +For resetting access certificates , you can set `resetAccessCAKeys` to true under access section in [values.yaml](values.yaml) and perform an helm upgrade. +* Note : Once helm upgrade is done, set `resetAccessCAKeys` to false for subsequent upgrades (to avoid resetting access certificates on every helm upgrade) +```yaml +access: + accessConfig: + security: + tls: true + customCertificatesSecretName: + resetAccessCAKeys: true +``` + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory-ha -f filebeat.yaml --namespace artifactory-ha center/jfrog/artifactory +``` + +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +### Install Artifactory HA with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. +In order to do that, simply add the following to a `artifactory-ssl-values.yaml` file: +```yaml + nginx: + https: + enabled: false + service: + ssloffload: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ssl-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these below lines to `artifactory-ingress-values.yaml` file +```yaml + ingress: + enabled: true + hosts: + - artifactory.company.com + artifactory: + service: + type: NodePort + nginx + enabled: false +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ingress-values.yaml--namespace artifactory center/jfrog/artifactory +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-ha-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install artifactory-ha --namespace artifactory-ha center/jfrog/artifactory-ha -f artifactory-ha-values.yaml +``` + +### Dedicated Ingress object for replicator service + +You have the option to add additional ingress object to the Replicator service. An example for this use case can be routing the /replicator/ path to Artifactory. +In order to do that, simply add the following to a `artifactory-values.yaml` file: + +```yaml +artifactory: + replicator: + enabled: true + ingress: + name: + hosts: + - myhost.example.com + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-buffering: "off" + nginx.ingress.kubernetes.io/configuration-snippet: | + chunked_transfer_encoding on; + tls: + - hosts: + - "myhost.example.com" + secretName: +``` + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true center/kubernetes-ingress-nginx/ingress-nginx +``` +or create a values.yaml file with the following content: +```yaml +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx -f values.yaml +``` + +### Log Analytics + +#### FluentD, Prometheus and Grafana + +To configure Prometheus and Grafana to gather metrics from Artifactory through the use of FluentD, please refer to the log analytics repo: + +https://github.com/jfrog/log-analytics-prometheus + +That repo contains a file `artifactory-ha-values.yaml` that can be used to deploy Prometheus, Service Monitor, and Grafana with this chart. + + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ReverseProxyConfiguration.md b/charts/artifactory-ha/artifactory-ha/4.7.600/ReverseProxyConfiguration.md new file mode 100644 index 000000000..851593236 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory-ha jfrog/artifactory-ha -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory-ha nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory-ha + ``` \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/UPGRADE_NOTES.md b/charts/artifactory-ha/artifactory-ha/4.7.600/UPGRADE_NOTES.md new file mode 100644 index 000000000..b3326dccc --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/UPGRADE_NOTES.md @@ -0,0 +1,42 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 1.X to 2.X and above (Chart Versions) + +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* To upgrade from a version prior to 1.x, you first need to upgrade to latest version of 1.x as described in https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md. +* Note: If you are upgrading from 1.x to 4.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +## Upgrade from 0.X to 1.X (Chart Versions) +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + a. Scale down the cluster to primary node only (`node.replicaCount=0`) so the exported db and configuration will be kept on one known node (the primary) + b. If your Artifactory HA K8s service is set to member nodes only (`service.pool=members`) you will need to access the primary node directly (use `kubectl port-forward`) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + a. Scale the cluster member nodes back to the original size + * Artifactory should now be ready to get back to normal operation diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/app-readme.md b/charts/artifactory-ha/artifactory-ha/4.7.600/app-readme.md new file mode 100644 index 000000000..a5aa5fd47 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/app-readme.md @@ -0,0 +1,16 @@ +# JFrog Artifactory High Availability Helm Chart + +Universal Repository Manager supporting all major packaging formats, build tools and CI servers. + +## Chart Details +This chart will do the following: + +* Deploy Artifactory highly available cluster. 1 primary node and 2 member nodes. +* Deploy a PostgreSQL database +* Deploy an Nginx server(optional) + +## Useful links +Blog: [Herd Trust Into Your Rancher Labs Multi-Cloud Strategy with Artifactory](https://jfrog.com/blog/herd-trust-into-your-rancher-labs-multi-cloud-strategy-with-artifactory/) + +## Activate Your Artifactory Instance +Don't have a license? Please send an email to [rancher-jfrog-licenses@jfrog.com](mailto:rancher-jfrog-licenses@jfrog.com) to get it. diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/.helmignore b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/Chart.yaml new file mode 100644 index 000000000..2f858e60e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.3.4 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/README.md new file mode 100644 index 000000000..319291bc6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/README.md @@ -0,0 +1,680 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works +```cosnole +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/.helmignore b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/Chart.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 000000000..e6bac7873 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.6.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.6.2 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/README.md new file mode 100644 index 000000000..e04391a3f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/README.md @@ -0,0 +1,274 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Validations** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | When a chart is using `bitnami/mariadb` as subchart you should use this to validate required password are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "context" $` | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Errors** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +**Utils** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +**NOTES.txt** + +``` +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c0ea2c70c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_errors.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..d6d3ec65a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_images.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 000000000..aafde9f3b --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_labels.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_names.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 000000000..adf2a74f4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..d6165a294 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_storage.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_utils.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..7d02f2ef6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_validations.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100644 index 000000000..62635b30e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $valueKeyArray := splitList "." .valueKey -}} + {{- $value := "" -}} + {{- $latestObj := $.context.Values -}} + {{- range $valueKeyArray -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.valueKey | fail -}} + {{- end -}} + + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} + {{- end -}} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a mariadb required password must not be empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "context" $) }} + +Validate value params: + - secret - String - Required. Name of the secret where mysql values are stored, e.g: "mysql-passwords-secret" +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- if and (not .context.Values.mariadb.existingSecret) .context.Values.mariadb.enabled -}} + {{- $requiredPasswords := list -}} + + {{- if .context.Values.mariadb.secret.requirePasswords -}} + {{- $requiredRootMariadbPassword := dict "valueKey" "mariadb.rootUser.password" "secret" .secretName "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootMariadbPassword -}} + + {{- if not (empty .context.Values.mariadb.db.user) -}} + {{- $requiredMariadbPassword := dict "valueKey" "mariadb.db.password" "secret" .secretName "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredMariadbPassword -}} + {{- end -}} + + {{- if .context.Values.mariadb.replication.enabled -}} + {{- $requiredReplicationPassword := dict "valueKey" "mariadb.replication.password" "secret" .secretName "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a postgresql required password must not be empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) $enabled -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if $enabledReplication -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.enabled | quote -}} + {{- else -}} + true + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.replication.enabled | quote -}} + {{- else -}} + {{- .context.Values.replication.enabled | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/values.yaml new file mode 100644 index 000000000..9ecdc93f5 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/commonAnnotations.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 000000000..f6977823c --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/shmvolume-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 000000000..347d3b40a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/README.md new file mode 100644 index 000000000..1813a2fea --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/conf.d/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/conf.d/README.md new file mode 100644 index 000000000..184c1875d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 000000000..cba38091e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.lock b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.lock new file mode 100644 index 000000000..72e1642e2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.6.2 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-09-01T17:40:02.795096189Z" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.yaml new file mode 100644 index 000000000..2c28bfe14 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/NOTES.txt b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/NOTES.txt new file mode 100644 index 000000000..596e969ce --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 000000000..68cd0dc0e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,501 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/configmap.yaml new file mode 100644 index 000000000..b29ef6040 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/extended-config-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 000000000..f21a97654 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/initialization-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 000000000..6637867a3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 000000000..6b7a3171e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-svc.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 000000000..b993c9971 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 000000000..2a7b372fe --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/podsecuritypolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..da0b3ab11 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/prometheusrule.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 000000000..b0c41b1a4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/role.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/role.yaml new file mode 100644 index 000000000..6d3cf50a4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/rolebinding.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 000000000..f7837388d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/secrets.yaml new file mode 100644 index 000000000..c93dbe0bd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 000000000..17f7ff399 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/servicemonitor.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 000000000..d57b7fb48 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset-slaves.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 000000000..54d24099f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,345 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 000000000..0e6eefebb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,514 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-headless.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 000000000..49131578a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-read.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 000000000..885c7bb04 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc.yaml new file mode 100644 index 000000000..e9fc50456 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values-production.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values-production.yaml new file mode 100644 index 000000000..c08014549 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values-production.yaml @@ -0,0 +1,594 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'on' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.schema.json b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.schema.json new file mode 100644 index 000000000..7b5e2efc3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.yaml new file mode 100644 index 000000000..f45c4183d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/charts/postgresql/values.yaml @@ -0,0 +1,600 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ci/access-tls-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/access-tls-values.yaml new file mode 100644 index 000000000..e11939170 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/access-tls-values.yaml @@ -0,0 +1,13 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + image: + tag: 12.3.0-debian-10-r71 + postgresqlPassword: password +access: + accessConfig: + security: + tls: true + resetAccessCAKeys: true diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ci/default-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/default-values.yaml new file mode 100644 index 000000000..1ebf93823 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/default-values.yaml @@ -0,0 +1,9 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +databaseUpgradeReady: true +## This is an exception here because HA needs masterKey to connect with other node members and it is commented in values to support 6.x to 7.x Migration +## Please refer https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#special-upgrade-notes-1 +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ci/global-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/global-values.yaml new file mode 100644 index 000000000..d2ce1d675 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/global-values.yaml @@ -0,0 +1,47 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +global: + versions: + artifactory: 7.11.2 + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + customInitContainers: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: volume + # Add custom volumes + customVolumes: | + - name: custom-script + emptyDir: + sizeLimit: 100Mi + # Add custom volumesMounts + customVolumeMounts: | + - name: custom-script + mountPath: "/scripts" + # Add custom sidecar containers + customSidecarContainers: | + - name: "sidecar-list-etc" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + securityContext: + allowPrivilegeEscalation: false + command: ["sh","-c","echo 'Sidecar is running' >> /scripts/sidecar.txt; cat /scripts/sidecar.txt; while true; do sleep 30; done"] + volumeMounts: + - mountPath: "/scripts" + name: custom-script + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ci/migration-disabled-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/migration-disabled-values.yaml new file mode 100644 index 000000000..6c1e2587f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/migration-disabled-values.yaml @@ -0,0 +1,8 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + migration: + enabled: false diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/ci/test-values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/test-values.yaml new file mode 100644 index 000000000..0ee7cbdd2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/ci/test-values.yaml @@ -0,0 +1,14 @@ +databaseUpgradeReady: true +artifactory: + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + persistence: + enabled: true + +postgresql: + image: + tag: 9.6.18-debian-10-r7 + postgresqlPassword: "password" + postgresqlExtendedConf: + maxConnections: "102" + persistence: + enabled: true diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/files/migrate.sh b/charts/artifactory-ha/artifactory-ha/4.7.600/files/migrate.sh new file mode 100644 index 000000000..e35bfdbb2 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/files/migrate.sh @@ -0,0 +1,4339 @@ +#!/bin/bash + +# Flags +FLAG_Y="y" +FLAG_N="n" +FLAGS_Y_N="$FLAG_Y $FLAG_N" +FLAG_NOT_APPLICABLE="_NA_" + +CURRENT_VERSION=$1 + +WRAPPER_SCRIPT_TYPE_RPMDEB="RPMDEB" +WRAPPER_SCRIPT_TYPE_DOCKER_COMPOSE="DOCKERCOMPOSE" + +SENSITIVE_KEY_VALUE="__sensitive_key_hidden___" + +# Shared system keys +SYS_KEY_SHARED_JFROGURL="shared.jfrogUrl" +SYS_KEY_SHARED_SECURITY_JOINKEY="shared.security.joinKey" +SYS_KEY_SHARED_SECURITY_MASTERKEY="shared.security.masterKey" + +SYS_KEY_SHARED_NODE_ID="shared.node.id" +SYS_KEY_SHARED_JAVAHOME="shared.javaHome" + +SYS_KEY_SHARED_DATABASE_TYPE="shared.database.type" +SYS_KEY_SHARED_DATABASE_TYPE_VALUE_POSTGRES="postgresql" +SYS_KEY_SHARED_DATABASE_DRIVER="shared.database.driver" +SYS_KEY_SHARED_DATABASE_URL="shared.database.url" +SYS_KEY_SHARED_DATABASE_USERNAME="shared.database.username" +SYS_KEY_SHARED_DATABASE_PASSWORD="shared.database.password" + +SYS_KEY_SHARED_ELASTICSEARCH_URL="shared.elasticsearch.url" +SYS_KEY_SHARED_ELASTICSEARCH_USERNAME="shared.elasticsearch.username" +SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD="shared.elasticsearch.password" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP="shared.elasticsearch.clusterSetup" +SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE="shared.elasticsearch.unicastFile" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP_VALUE="YES" + +# Define this in product specific script. Should contain the path to unitcast file +# File used by insight server to write cluster active nodes info. This will be read by elasticsearch +#SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE_VALUE="" + +SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME="shared.rabbitMq.active.node.name" +SYS_KEY_RABBITMQ_ACTIVE_NODE_IP="shared.rabbitMq.active.node.ip" + +# Filenames +FILE_NAME_SYSTEM_YAML="system.yaml" +FILE_NAME_JOIN_KEY="join.key" +FILE_NAME_MASTER_KEY="master.key" +FILE_NAME_INSTALLER_YAML="installer.yaml" + +# Global constants used in business logic +NODE_TYPE_STANDALONE="standalone" +NODE_TYPE_CLUSTER_NODE="node" +NODE_TYPE_DATABASE="database" + +# External(isable) databases +DATABASE_POSTGRES="POSTGRES" +DATABASE_ELASTICSEARCH="ELASTICSEARCH" +DATABASE_RABBITMQ="RABBITMQ" + +POSTGRES_LABEL="PostgreSQL" +ELASTICSEARCH_LABEL="Elasticsearch" +RABBITMQ_LABEL="Rabbitmq" + +ARTIFACTORY_LABEL="Artifactory" +JFMC_LABEL="Mission Control" +DISTRIBUTION_LABEL="Distribution" +XRAY_LABEL="Xray" + +POSTGRES_CONTAINER="postgres" +ELASTICSEARCH_CONTAINER="elasticsearch" +RABBITMQ_CONTAINER="rabbitmq" +REDIS_CONTAINER="redis" + +#Adding a small timeout before a read ensures it is positioned correctly in the screen +read_timeout=0.5 + +# Options related to data directory location +PROMPT_DATA_DIR_LOCATION="Installation Directory" +KEY_DATA_DIR_LOCATION="installer.data_dir" + +SYS_KEY_SHARED_NODE_HAENABLED="shared.node.haEnabled" +PROMPT_ADD_TO_CLUSTER="Are you adding an additional node to an existing product cluster?" +KEY_ADD_TO_CLUSTER="installer.ha" +VALID_VALUES_ADD_TO_CLUSTER="$FLAGS_Y_N" + +MESSAGE_POSTGRES_INSTALL="The installer can install a $POSTGRES_LABEL database, or you can connect to an existing compatible $POSTGRES_LABEL database\n(compatible databases: https://www.jfrog.com/confluence/display/JFROG/System+Requirements#SystemRequirements-RequirementsMatrix)" +PROMPT_POSTGRES_INSTALL="Do you want to install $POSTGRES_LABEL?" +KEY_POSTGRES_INSTALL="installer.install_postgresql" +VALID_VALUES_POSTGRES_INSTALL="$FLAGS_Y_N" + +# Postgres connection details +RPM_DEB_POSTGRES_HOME_DEFAULT="/var/opt/jfrog/postgres" +RPM_DEB_MESSAGE_STANDALONE_POSTGRES_DATA="$POSTGRES_LABEL home will have data and its configuration" +RPM_DEB_PROMPT_STANDALONE_POSTGRES_DATA="Type desired $POSTGRES_LABEL home location" +RPM_DEB_KEY_STANDALONE_POSTGRES_DATA="installer.postgresql.home" + +MESSAGE_DATABASE_URL="Provide the database connection details" +PROMPT_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://:/artifactory" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://:/mission_control?sslmode=disable" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://:/distribution?sslmode=disable" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://:/xraydb?sslmode=disable" + ;; + esac + if [ -z "$databaseURlExample" ]; then + echo -n "$POSTGRES_LABEL URL" # For consistency with username and password + return + fi + echo -n "$POSTGRES_LABEL url. Example: [$databaseURlExample]" +} +REGEX_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://.*/artifactory.*" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://.*/mission_control.*" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://.*/distribution.*" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://.*/xraydb.*" + ;; + esac + echo -n "^$databaseURlExample\$" +} +ERROR_MESSAGE_DATABASE_URL="Invalid $POSTGRES_LABEL URL" +KEY_DATABASE_URL="$SYS_KEY_SHARED_DATABASE_URL" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_USERNAME="$POSTGRES_LABEL username" +KEY_DATABASE_USERNAME="$SYS_KEY_SHARED_DATABASE_USERNAME" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_PASSWORD="$POSTGRES_LABEL password" +KEY_DATABASE_PASSWORD="$SYS_KEY_SHARED_DATABASE_PASSWORD" +IS_SENSITIVE_DATABASE_PASSWORD="$FLAG_Y" + +MESSAGE_STANDALONE_ELASTICSEARCH_INSTALL="The installer can install a $ELASTICSEARCH_LABEL database or you can connect to an existing compatible $ELASTICSEARCH_LABEL database" +PROMPT_STANDALONE_ELASTICSEARCH_INSTALL="Do you want to install $ELASTICSEARCH_LABEL?" +KEY_STANDALONE_ELASTICSEARCH_INSTALL="installer.install_elasticsearch" +VALID_VALUES_STANDALONE_ELASTICSEARCH_INSTALL="$FLAGS_Y_N" + +# Elasticsearch connection details +MESSAGE_ELASTICSEARCH_DETAILS="Provide the $ELASTICSEARCH_LABEL connection details" +PROMPT_ELASTICSEARCH_URL="$ELASTICSEARCH_LABEL URL" +KEY_ELASTICSEARCH_URL="$SYS_KEY_SHARED_ELASTICSEARCH_URL" + +PROMPT_ELASTICSEARCH_USERNAME="$ELASTICSEARCH_LABEL username" +KEY_ELASTICSEARCH_USERNAME="$SYS_KEY_SHARED_ELASTICSEARCH_USERNAME" + +PROMPT_ELASTICSEARCH_PASSWORD="$ELASTICSEARCH_LABEL password" +KEY_ELASTICSEARCH_PASSWORD="$SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD" +IS_SENSITIVE_ELASTICSEARCH_PASSWORD="$FLAG_Y" + +# Cluster related questions +MESSAGE_CLUSTER_MASTER_KEY="Provide the cluster's master key. It can be found in the data directory of the first node under /etc/security/master.key" +PROMPT_CLUSTER_MASTER_KEY="Master Key" +KEY_CLUSTER_MASTER_KEY="$SYS_KEY_SHARED_SECURITY_MASTERKEY" +IS_SENSITIVE_CLUSTER_MASTER_KEY="$FLAG_Y" + +MESSAGE_JOIN_KEY="The Join key is the secret key used to establish trust between services in the JFrog Platform.\n(You can copy the Join Key from Admin > Security > Settings)" +PROMPT_JOIN_KEY="Join Key" +KEY_JOIN_KEY="$SYS_KEY_SHARED_SECURITY_JOINKEY" +IS_SENSITIVE_JOIN_KEY="$FLAG_Y" +REGEX_JOIN_KEY="^[a-zA-Z0-9]{16,}\$" +ERROR_MESSAGE_JOIN_KEY="Invalid Join Key" + +# Rabbitmq related cluster information +MESSAGE_RABBITMQ_ACTIVE_NODE_NAME="Provide an active ${RABBITMQ_LABEL} node name. Run the command [ hostname -s ] on any of the existing nodes in the product cluster to get this" +PROMPT_RABBITMQ_ACTIVE_NODE_NAME="${RABBITMQ_LABEL} active node name" +KEY_RABBITMQ_ACTIVE_NODE_NAME="$SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME" + +# Rabbitmq related cluster information (necessary only for docker-compose) +PROMPT_RABBITMQ_ACTIVE_NODE_IP="${RABBITMQ_LABEL} active node ip" +KEY_RABBITMQ_ACTIVE_NODE_IP="$SYS_KEY_RABBITMQ_ACTIVE_NODE_IP" + +MESSAGE_JFROGURL(){ + echo -e "The JFrog URL allows ${PRODUCT_NAME} to connect to a JFrog Platform Instance.\n(You can copy the JFrog URL from Admin > Security > Settings)" +} +PROMPT_JFROGURL="JFrog URL" +KEY_JFROGURL="$SYS_KEY_SHARED_JFROGURL" +REGEX_JFROGURL="^https?://.*:{0,}[0-9]{0,4}\$" +ERROR_MESSAGE_JFROGURL="Invalid JFrog URL" + + +# Set this to FLAG_Y on upgrade +IS_UPGRADE="${FLAG_N}" + +# This belongs in JFMC but is the ONLY one that needs it so keeping it here for now. Can be made into a method and overridden if necessary +MESSAGE_MULTIPLE_PG_SCHEME="Please setup $POSTGRES_LABEL with schema as described in https://www.jfrog.com/confluence/display/JFROG/Installing+Mission+Control" + +_getMethodOutputOrVariableValue() { + unset EFFECTIVE_MESSAGE + local keyToSearch=$1 + local effectiveMessage= + local result="0" + # logSilly "Searching for method: [$keyToSearch]" + LC_ALL=C type "$keyToSearch" > /dev/null 2>&1 || result="$?" + if [[ "$result" == "0" ]]; then + # logSilly "Found method for [$keyToSearch]" + EFFECTIVE_MESSAGE="$($keyToSearch)" + return + fi + eval EFFECTIVE_MESSAGE=\${$keyToSearch} + if [ ! -z "$EFFECTIVE_MESSAGE" ]; then + return + fi + # logSilly "Didn't find method or variable for [$keyToSearch]" +} + + +# REF https://misc.flogisoft.com/bash/tip_colors_and_formatting +cClear="\e[0m" +cBlue="\e[38;5;69m" +cRedDull="\e[1;31m" +cYellow="\e[1;33m" +cRedBright="\e[38;5;197m" +cBold="\e[1m" + + +_loggerGetModeRaw() { + local MODE="$1" + case $MODE in + INFO) + printf "" + ;; + DEBUG) + printf "%s" "[${MODE}] " + ;; + WARN) + printf "${cRedDull}%s%s${cClear}" "[" "${MODE}" "] " + ;; + ERROR) + printf "${cRedBright}%s%s${cClear}" "[" "${MODE}" "] " + ;; + esac +} + + +_loggerGetMode() { + local MODE="$1" + case $MODE in + INFO) + printf "${cBlue}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + DEBUG) + printf "%-7s" "[${MODE}]" + ;; + WARN) + printf "${cRedDull}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + ERROR) + printf "${cRedBright}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + esac +} + +# Capitalises the first letter of the message +_loggerGetMessage() { + local originalMessage="$*" + local firstChar=$(echo "${originalMessage:0:1}" | awk '{ print toupper($0) }') + local resetOfMessage="${originalMessage:1}" + echo "$firstChar$resetOfMessage" +} + +# The spec also says content should be left-trimmed but this is not necessary in our case. We don't reach the limit. +_loggerGetStackTrace() { + printf "%s%-30s%s" "[" "$1:$2" "]" +} + +_loggerGetThread() { + printf "%s" "[main]" +} + +_loggerGetServiceType() { + printf "%s%-5s%s" "[" "shell" "]" +} + +#Trace ID is not applicable to scripts +_loggerGetTraceID() { + printf "%s" "[]" +} + +logRaw() { + echo "" + printf "$1" + echo "" +} + +logBold(){ + echo "" + printf "${cBold}$1${cClear}" + echo "" +} + +# The date binary works differently based on whether it is GNU/BSD +is_date_supported=0 +date --version > /dev/null 2>&1 || is_date_supported=1 +IS_GNU=$(echo $is_date_supported) + +_loggerGetTimestamp() { + if [ "${IS_GNU}" == "0" ]; then + echo -n $(date -u +%FT%T.%3NZ) + else + echo -n $(date -u +%FT%T.000Z) + fi +} + +# https://www.shellscript.sh/tips/spinner/ +_spin() +{ + spinner="/|\\-/|\\-" + while : + do + for i in `seq 0 7` + do + echo -n "${spinner:$i:1}" + echo -en "\010" + sleep 1 + done + done +} + +showSpinner() { + # Start the Spinner: + _spin & + # Make a note of its Process ID (PID): + SPIN_PID=$! + # Kill the spinner on any signal, including our own exit. + trap "kill -9 $SPIN_PID" `seq 0 15` &> /dev/null || return 0 +} + +stopSpinner() { + local occurrences=$(ps -ef | grep -wc "${SPIN_PID}") + let "occurrences+=0" + # validate that it is present (2 since this search itself will show up in the results) + if [ $occurrences -gt 1 ]; then + kill -9 $SPIN_PID &>/dev/null || return 0 + wait $SPIN_ID &>/dev/null + fi +} + +_getEffectiveMessage(){ + local MESSAGE="$1" + local MODE=${2-"INFO"} + + if [ -z "$CONTEXT" ]; then + CONTEXT=$(caller) + fi + + _EFFECTIVE_MESSAGE= + if [ -z "$LOG_BEHAVIOR_ADD_META" ]; then + _EFFECTIVE_MESSAGE="$(_loggerGetModeRaw $MODE)$(_loggerGetMessage $MESSAGE)" + else + local SERVICE_TYPE="script" + local TRACE_ID="" + local THREAD="main" + + local CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}') + local CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}') + + _EFFECTIVE_MESSAGE="$(_loggerGetTimestamp) $(_loggerGetServiceType) $(_loggerGetMode $MODE) $(_loggerGetTraceID) $(_loggerGetStackTrace $CONTEXT_FILE $CONTEXT_LINE) $(_loggerGetThread) - $(_loggerGetMessage $MESSAGE)" + fi + CONTEXT= +} + +# Important - don't call any log method from this method. Will become an infinite loop. Use echo to debug +_logToFile() { + local MODE=${1-"INFO"} + local targetFile="$LOG_BEHAVIOR_ADD_REDIRECTION" + # IF the file isn't passed, abort + if [ -z "$targetFile" ]; then + return + fi + # IF this is not being run in verbose mode and mode is debug or lower, abort + if [ "${VERBOSE_MODE}" != "$FLAG_Y" ] && [ "${VERBOSE_MODE}" != "true" ] && [ "${VERBOSE_MODE}" != "debug" ]; then + if [ "$MODE" == "DEBUG" ] || [ "$MODE" == "SILLY" ]; then + return + fi + fi + + # Create the file if it doesn't exist + if [ ! -f "${targetFile}" ]; then + return + # touch $targetFile > /dev/null 2>&1 || true + fi + # # Make it readable + # chmod 640 $targetFile > /dev/null 2>&1 || true + + # Log contents + printf "%s\n" "$_EFFECTIVE_MESSAGE" >> "$targetFile" || true +} + +logger() { + if [ "$LOG_BEHAVIOR_ADD_NEW_LINE" == "$FLAG_Y" ]; then + echo "" + fi + _getEffectiveMessage "$@" + local MODE=${2-"INFO"} + printf "%s\n" "$_EFFECTIVE_MESSAGE" + _logToFile "$MODE" +} + +logDebug(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "$FLAG_Y" ] || [ "${VERBOSE_MODE}" == "true" ] || [ "${VERBOSE_MODE}" == "debug" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logSilly(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "silly" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logError() { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= +} + +errorExit () { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= + exit 1 +} + +warn () { + CONTEXT=$(caller) + logger "$1" "WARN" + CONTEXT= +} + +note () { + CONTEXT=$(caller) + logger "$1" "NOTE" + CONTEXT= +} + +bannerStart() { + title=$1 + echo + echo -e "\033[1m${title}\033[0m" + echo +} + +bannerSection() { + title=$1 + echo + echo -e "******************************** ${title} ********************************" + echo +} + +bannerSubSection() { + title=$1 + echo + echo -e "************** ${title} *******************" + echo +} + +bannerMessge() { + title=$1 + echo + echo -e "********************************" + echo -e "${title}" + echo -e "********************************" + echo +} + +setRed () { + local input="$1" + echo -e \\033[31m${input}\\033[0m +} +setGreen () { + local input="$1" + echo -e \\033[32m${input}\\033[0m +} +setYellow () { + local input="$1" + echo -e \\033[33m${input}\\033[0m +} + +logger_addLinebreak () { + echo -e "---\n" +} + +bannerImportant() { + title=$1 + local bold="\033[1m" + local noColour="\033[0m" + echo + echo -e "${bold}######################################## IMPORTANT ########################################${noColour}" + echo -e "${bold}${title}${noColour}" + echo -e "${bold}###########################################################################################${noColour}" + echo +} + +bannerEnd() { + #TODO pass a title and calculate length dynamically so that start and end look alike + echo + echo "*****************************************************************************" + echo +} + +banner() { + title=$1 + content=$2 + bannerStart "${title}" + echo -e "$content" +} + +# The logic below helps us redirect content we'd normally hide to the log file. + # + # We have several commands which clutter the console with output and so use + # `cmd > /dev/null` - this redirects the command's output to null. + # + # However, the information we just hid maybe useful for support. Using the code pattern + # `cmd >&6` (instead of `cmd> >/dev/null` ), the command's output is hidden from the console + # but redirected to the installation log file + # + +#Default value of 6 is just null +exec 6>>/dev/null +redirectLogsToFile() { + echo "" + # local file=$1 + + # [ ! -z "${file}" ] || return 0 + + # local logDir=$(dirname "$file") + + # if [ ! -f "${file}" ]; then + # [ -d "${logDir}" ] || mkdir -p ${logDir} || \ + # ( echo "WARNING : Could not create parent directory (${logDir}) to redirect console log : ${file}" ; return 0 ) + # fi + + # #6 now points to the log file + # exec 6>>${file} + # #reference https://unix.stackexchange.com/questions/145651/using-exec-and-tee-to-redirect-logs-to-stdout-and-a-log-file-in-the-same-time + # exec 2>&1 > >(tee -a "${file}") +} + +# Check if a give key contains any sensitive string as part of it +# Based on the result, the caller can decide its value can be displayed or not +# Sample usage : isKeySensitive "${key}" && displayValue="******" || displayValue=${value} +isKeySensitive(){ + local key=$1 + local sensitiveKeys="password|secret|key|token" + + if [ -z "${key}" ]; then + return 1 + else + local lowercaseKey=$(echo "${key}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + [[ "${lowercaseKey}" =~ ${sensitiveKeys} ]] && return 0 || return 1 + fi +} + +getPrintableValueOfKey(){ + local displayValue= + local key="$1" + if [ -z "$key" ]; then + # This is actually an incorrect usage of this method but any logging will cause unexpected content in the caller + echo -n "" + return + fi + + local value="$2" + isKeySensitive "${key}" && displayValue="$SENSITIVE_KEY_VALUE" || displayValue="${value}" + echo -n $displayValue +} + +_createConsoleLog(){ + if [ -z "${JF_PRODUCT_HOME}" ]; then + return + fi + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + mkdir -p "${JF_PRODUCT_HOME}/var/log" || true + if [ ! -f ${targetFile} ]; then + touch $targetFile > /dev/null 2>&1 || true + fi + chmod 640 $targetFile > /dev/null 2>&1 || true +} + +# Output from application's logs are piped to this method. It checks a configuration variable to determine if content should be logged to +# the common console.log file +redirectServiceLogsToFile() { + + local result="0" + # check if the function getSystemValue exists + LC_ALL=C type getSystemValue > /dev/null 2>&1 || result="$?" + if [[ "$result" != "0" ]]; then + warn "Couldn't find the systemYamlHelper. Skipping log redirection" + return 0 + fi + + getSystemValue "shared.consoleLog" "NOT_SET" + if [[ "${YAML_VALUE}" == "false" ]]; then + logger "Redirection is set to false. Skipping log redirection" + return 0; + fi + + if [ -z "${JF_PRODUCT_HOME}" ] || [ "${JF_PRODUCT_HOME}" == "" ]; then + warn "JF_PRODUCT_HOME is unavailable. Skipping log redirection" + return 0 + fi + + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + + _createConsoleLog + + while read -r line; do + printf '%s\n' "${line}" >> $targetFile || return 0 # Don't want to log anything - might clutter the screen + done +} + +## Display environment variables starting with JF_ along with its value +## Value of sensitive keys will be displayed as "******" +## +## Sample Display : +## +## ======================== +## JF Environment variables +## ======================== +## +## JF_SHARED_NODE_ID : locahost +## JF_SHARED_JOINKEY : ****** +## +## +displayEnv() { + local JFEnv=$(printenv | grep ^JF_ 2>/dev/null) + local key= + local value= + + if [ -z "${JFEnv}" ]; then + return + fi + + cat << ENV_START_MESSAGE + +======================== +JF Environment variables +======================== +ENV_START_MESSAGE + + for entry in ${JFEnv}; do + key=$(echo "${entry}" | awk -F'=' '{print $1}') + value=$(echo "${entry}" | awk -F'=' '{print $2}') + + isKeySensitive "${key}" && value="******" || value=${value} + + printf "\n%-35s%s" "${key}" " : ${value}" + done + echo; +} + +_addLogRotateConfiguration() { + logDebug "Method ${FUNCNAME[0]}" + # mandatory inputs + local confFile="$1" + local logFile="$2" + + # Method available in _ioOperations.sh + LC_ALL=C type io_setYQPath > /dev/null 2>&1 || return 1 + + io_setYQPath + + # Method available in _systemYamlHelper.sh + LC_ALL=C type getSystemValue > /dev/null 2>&1 || return 1 + + local frequency="daily" + local archiveFolder="archived" + + local compressLogFiles= + getSystemValue "shared.logging.rotation.compress" "true" + if [[ "${YAML_VALUE}" == "true" ]]; then + compressLogFiles="compress" + fi + + getSystemValue "shared.logging.rotation.maxFiles" "10" + local noOfBackupFiles="${YAML_VALUE}" + + getSystemValue "shared.logging.rotation.maxSizeMb" "25" + local sizeOfFile="${YAML_VALUE}M" + + logDebug "Adding logrotate configuration for [$logFile] to [$confFile]" + + # Add configuration to file + local confContent=$(cat << LOGROTATECONF +$logFile { + $frequency + missingok + rotate $noOfBackupFiles + $compressLogFiles + notifempty + olddir $archiveFolder + dateext + extension .log + dateformat -%Y-%m-%d + size ${sizeOfFile} +} +LOGROTATECONF +) + echo "${confContent}" > ${confFile} || return 1 +} + +_operationIsBySameUser() { + local targetUser="$1" + local currentUserID=$(id -u) + local currentUserName=$(id -un) + + if [ $currentUserID == $targetUser ] || [ $currentUserName == $targetUser ]; then + echo -n "yes" + else + echo -n "no" + fi +} + +_addCronJobForLogrotate() { + logDebug "Method ${FUNCNAME[0]}" + + # Abort if logrotate is not available + [ "$(io_commandExists 'crontab')" != "yes" ] && warn "cron is not available" && return 1 + + # mandatory inputs + local productHome="$1" + local confFile="$2" + local cronJobOwner="$3" + + # We want to use our binary if possible. It may be more recent than the one in the OS + local logrotateBinary="$productHome/app/third-party/logrotate/logrotate" + + if [ ! -f "$logrotateBinary" ]; then + logrotateBinary="logrotate" + [ "$(io_commandExists 'logrotate')" != "yes" ] && warn "logrotate is not available" && return 1 + fi + local cmd="$logrotateBinary ${confFile} --state $productHome/var/etc/logrotate/logrotate-state" #--verbose + + id -u $cronJobOwner > /dev/null 2>&1 || { warn "User $cronJobOwner does not exist. Aborting logrotate configuration" && return 1; } + + # Remove the existing line + removeLogRotation "$productHome" "$cronJobOwner" || true + + # Run logrotate daily at 23:55 hours + local cronInterval="55 23 * * * $cmd" + + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + # If this is standalone mode, we cannot use -u - the user running this process may not have the necessary privileges + if [ "$standaloneMode" == "no" ]; then + (crontab -l -u $cronJobOwner 2>/dev/null; echo "$cronInterval") | crontab -u $cronJobOwner - + else + (crontab -l 2>/dev/null; echo "$cronInterval") | crontab - + fi +} + +## Configure logrotate for a product +## Failure conditions: +## If logrotation could not be setup for some reason +## Parameters: +## $1: The product name +## $2: The product home +## Depends on global: none +## Updates global: none +## Returns: NA + +configureLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + + # mandatory inputs + local productName="$1" + if [ -z $productName ]; then + warn "Incorrect usage. A product name is necessary for configuring log rotation" && return 1 + fi + + local productHome="$2" + if [ -z $productHome ]; then + warn "Incorrect usage. A product home folder is necessary for configuring log rotation" && return 1 + fi + + local logFile="${productHome}/var/log/console.log" + if [[ $(uname) == "Darwin" ]]; then + logger "Log rotation for [$logFile] has not been configured. Please setup manually" + return 0 + fi + + local userID="$3" + if [ -z $userID ]; then + warn "Incorrect usage. A userID is necessary for configuring log rotation" && return 1 + fi + + local groupID=${4:-$userID} + local logConfigOwner=${5:-$userID} + + logDebug "Configuring log rotation as user [$userID], group [$groupID], effective cron User [$logConfigOwner]" + + local errorMessage="Could not configure logrotate. Please configure log rotation of the file: [$logFile] manually" + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + # TODO move to recursive method + createDir "${productHome}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log/archived" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + + # TODO move to recursive method + createDir "${productHome}/var/etc" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/etc/logrotate" "$logConfigOwner" || { warn "${errorMessage}" && return 1; } + + # conf file should be owned by the user running the script + createFile "${confFile}" "${logConfigOwner}" || { warn "Could not create configuration file [$confFile]" return 1; } + + _addLogRotateConfiguration "${confFile}" "${logFile}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + _addCronJobForLogrotate "${productHome}" "${confFile}" "${logConfigOwner}" || { warn "${errorMessage}" && return 1; } +} + +_pauseExecution() { + if [ "${VERBOSE_MODE}" == "debug" ]; then + + local breakPoint="$1" + if [ ! -z "$breakPoint" ]; then + printf "${cBlue}Breakpoint${cClear} [$breakPoint] " + echo "" + fi + printf "${cBlue}Press enter once you are ready to continue${cClear}" + read -s choice + echo "" + fi +} + +# removeLogRotation "$productHome" "$cronJobOwner" || true +removeLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + if [[ $(uname) == "Darwin" ]]; then + logDebug "Not implemented for Darwin." + return 0 + fi + local productHome="$1" + local cronJobOwner="$2" + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + if [ "$standaloneMode" == "no" ]; then + crontab -l -u $cronJobOwner 2>/dev/null | grep -v "$confFile" | crontab -u $cronJobOwner - + else + crontab -l 2>/dev/null | grep -v "$confFile" | crontab - + fi +} + +# NOTE: This method does not check the configuration to see if redirection is necessary. +# This is intentional. If we don't redirect, tomcat logs might get redirected to a folder/file +# that does not exist, causing the service itself to not start +setupTomcatRedirection() { + logDebug "Method ${FUNCNAME[0]}" + local consoleLog="${JF_PRODUCT_HOME}/var/log/console.log" + _createConsoleLog + export CATALINA_OUT="${consoleLog}" +} + +setupScriptLogsRedirection() { + logDebug "Method ${FUNCNAME[0]}" + if [ -z "${JF_PRODUCT_HOME}" ]; then + logDebug "No JF_PRODUCT_HOME. Returning" + return + fi + # Create the console.log file if it is not already present + # _createConsoleLog || true + # # Ensure any logs (logger/logError/warn) also get redirected to the console.log + # # Using installer.log as a temparory fix. Please change this to console.log once INST-291 is fixed + export LOG_BEHAVIOR_ADD_REDIRECTION="${JF_PRODUCT_HOME}/var/log/console.log" + export LOG_BEHAVIOR_ADD_META="$FLAG_Y" +} + +# Returns Y if this method is run inside a container +isRunningInsideAContainer() { + if [ -f "/.dockerenv" ]; then + echo -n "$FLAG_Y" + else + echo -n "$FLAG_N" + fi +} + +POSTGRES_USER=999 +NGINX_USER=104 +NGINX_GROUP=107 +ES_USER=1000 +REDIS_USER=999 +MONGO_USER=999 +RABBITMQ_USER=999 +LOG_FILE_PERMISSION=640 +PID_FILE_PERMISSION=644 + +# Copy file +copyFile(){ + local source=$1 + local target=$2 + local mode=${3:-overwrite} + local enableVerbose=${4:-"${FLAG_N}"} + local verboseFlag="" + + if [ ! -z "${enableVerbose}" ] && [ "${enableVerbose}" == "${FLAG_Y}" ]; then + verboseFlag="-v" + fi + + if [[ ! ( $source && $target ) ]]; then + warn "Source and target is mandatory to copy file" + return 1 + fi + + if [[ -f "${target}" ]]; then + [[ "$mode" = "overwrite" ]] && ( cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}") || true + else + cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}" + fi +} + +# Copy files recursively from given source directory to destination directory +# This method wil copy but will NOT overwrite +# Destination will be created if its not available +copyFilesNoOverwrite(){ + local src=$1 + local dest=$2 + local enableVerboseCopy="${3:-${FLAG_Y}}" + + if [[ -z "${src}" || -z "${dest}" ]]; then + return + fi + + if [ -d "${src}" ] && [ "$(ls -A ${src})" ]; then + local relativeFilePath="" + local targetFilePath="" + + for file in $(find ${src} -type f 2>/dev/null) ; do + # Derive relative path and attach it to destination + # Example : + # src=/extra_config + # dest=/var/opt/jfrog/artifactory/etc + # file=/extra_config/config.xml + # relativeFilePath=config.xml + # targetFilePath=/var/opt/jfrog/artifactory/etc/config.xml + relativeFilePath=${file/${src}/} + targetFilePath=${dest}${relativeFilePath} + + createDir "$(dirname "$targetFilePath")" + copyFile "${file}" "${targetFilePath}" "no_overwrite" "${enableVerboseCopy}" + done + fi +} + +# TODO : WINDOWS ? +# Check the max open files and open processes set on the system +checkULimits () { + local minMaxOpenFiles=${1:-32000} + local minMaxOpenProcesses=${2:-1024} + local setValue=${3:-true} + local warningMsgForFiles=${4} + local warningMsgForProcesses=${5} + + logger "Checking open files and processes limits" + + local currentMaxOpenFiles=$(ulimit -n) + logger "Current max open files is $currentMaxOpenFiles" + if [ ${currentMaxOpenFiles} != "unlimited" ] && [ "$currentMaxOpenFiles" -lt "$minMaxOpenFiles" ]; then + if [ "${setValue}" ]; then + ulimit -n "${minMaxOpenFiles}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForFiles}" ] || warn "${warningMsgForFiles}" + else + errorExit "Max number of open files $currentMaxOpenFiles, is too low. Cannot run the application!" + fi + fi + + local currentMaxOpenProcesses=$(ulimit -u) + logger "Current max open processes is $currentMaxOpenProcesses" + if [ "$currentMaxOpenProcesses" != "unlimited" ] && [ "$currentMaxOpenProcesses" -lt "$minMaxOpenProcesses" ]; then + if [ "${setValue}" ]; then + ulimit -u "${minMaxOpenProcesses}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForProcesses}" ] || warn "${warningMsgForProcesses}" + else + errorExit "Max number of open files $currentMaxOpenProcesses, is too low. Cannot run the application!" + fi + fi +} + +createDirs() { + local appDataDir=$1 + local serviceName=$2 + local folders="backup bootstrap data etc logs work" + + [ -z "${appDataDir}" ] && errorExit "An application directory is mandatory to create its data structure" || true + [ -z "${serviceName}" ] && errorExit "A service name is mandatory to create service data structure" || true + + for folder in ${folders} + do + folder=${appDataDir}/${folder}/${serviceName} + if [ ! -d "${folder}" ]; then + logger "Creating folder : ${folder}" + mkdir -p "${folder}" || errorExit "Failed to create ${folder}" + fi + done +} + + +testReadWritePermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local test_file=${dir_to_check}/test-permissions + + # Write file + if echo test > ${test_file} 1> /dev/null 2>&1; then + # Write succeeded. Testing read... + if cat ${test_file} > /dev/null; then + rm -f ${test_file} + else + error=true + fi + else + error=true + fi + + if [ ${error} == true ]; then + return 1 + else + return 0 + fi +} + +# Test directory has read/write permissions for current user +testDirectoryPermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local u_id=$(id -u) + local id_str="id ${u_id}" + + logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}" + + if ! testReadWritePermissions ${dir_to_check}; then + error=true + fi + + if [ "${error}" == true ]; then + local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check}) + logger "###########################################################" + logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}" + logger "${stat_data}" + logger "Mounted directory must have read/write permissions for user ${id_str}" + logger "###########################################################" + errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}" + fi + logger "Permissions for ${dir_to_check} are good" +} + +# Utility method to create a directory path recursively with chown feature as +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: Root directory from where the path can be created +## $2: List of recursive child directories seperated by space +## $3: user who should own the directory. Optional +## $4: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA +# +# Usage: +# createRecursiveDir "/opt/jfrog/product/var" "bootstrap tomcat lib" "user_name" "group_name" +createRecursiveDir(){ + local rootDir=$1 + local pathDirs=$2 + local user=$3 + local group=${4:-${user}} + local fullPath= + + [ ! -z "${rootDir}" ] || return 0 + + createDir "${rootDir}" "${user}" "${group}" + + [ ! -z "${pathDirs}" ] || return 0 + + fullPath=${rootDir} + + for dir in ${pathDirs}; do + fullPath=${fullPath}/${dir} + createDir "${fullPath}" "${user}" "${group}" + done +} + +# Utility method to create a directory +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: directory to create +## $2: user who should own the directory. Optional +## $3: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA + +createDir(){ + local dirName="$1" + local printMessage=no + logSilly "Method ${FUNCNAME[0]} invoked with [$dirName]" + [ -z "${dirName}" ] && return + + logDebug "Attempting to create ${dirName}" + mkdir -p "${dirName}" || errorExit "Unable to create directory: [${dirName}]" + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + # Earlier, this line would have returned 1 if it failed. Now it just warns. + # This is intentional. Earlier, this line would NOT be reached if the folder already existed. + # Since it will always come to this line and the script may be running as a non-root user, this method will just warn if + # setting permissions fails (so as to not affect any existing flows) + io_setOwnershipNonRecursive "$dirName" "$userID" "$groupID" || warn "Could not set owner of [$dirName] to [$userID:$groupID]" + fi + # logging message to print created dir with user and group + local logMessage=${4:-$printMessage} + if [[ "${logMessage}" == "yes" ]]; then + logger "Successfully created directory [${dirName}]. Owner: [${userID}:${groupID}]" + fi +} + +removeSoftLinkAndCreateDir () { + local dirName="$1" + local userID="$2" + local groupID="$3" + local logMessage="$4" + removeSoftLink "${dirName}" + createDir "${dirName}" "${userID}" "${groupID}" "${logMessage}" +} + +# Utility method to remove a soft link +removeSoftLink () { + local dirName="$1" + if [[ -L "${dirName}" ]]; then + targetLink=$(readlink -f "${dirName}") + logger "Removing the symlink [${dirName}] pointing to [${targetLink}]" + rm -f "${dirName}" + fi +} + +# Check Directory exist in the path +checkDirExists () { + local directoryPath="$1" + + [[ -d "${directoryPath}" ]] && echo -n "true" || echo -n "false" +} + + +# Utility method to create a file +# Failure conditions: +# Parameters: +## $1: file to create +# Depends on global: none +# Updates global: none +# Returns: NA + +createFile(){ + local fileName="$1" + logSilly "Method ${FUNCNAME[0]} [$fileName]" + [ -f "${fileName}" ] && return 0 + touch "${fileName}" || return 1 + + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + io_setOwnership "$fileName" "$userID" "$groupID" || return 1 + fi +} + +# Check File exist in the filePath +# IMPORTANT- DON'T ADD LOGGING to this method +checkFileExists () { + local filePath="$1" + + [[ -f "${filePath}" ]] && echo -n "true" || echo -n "false" +} + +# Check for directories contains any (files or sub directories) +# IMPORTANT- DON'T ADD LOGGING to this method +checkDirContents () { + local directoryPath="$1" + if [[ "$(ls -1 "${directoryPath}" | wc -l)" -gt 0 ]]; then + echo -n "true" + else + echo -n "false" + fi +} + +# Check contents exist in directory +# IMPORTANT- DON'T ADD LOGGING to this method +checkContentExists () { + local source="$1" + + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + echo -n "false" + else + echo -n "true" + fi +} + +# Resolve the variable +# IMPORTANT- DON'T ADD LOGGING to this method +evalVariable () { + local output="$1" + local input="$2" + + eval "${output}"=\${"${input}"} + eval echo \${"${output}"} +} + +# Usage: if [ "$(io_commandExists 'curl')" == "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_commandExists() { + local commandToExecute="$1" + hash "${commandToExecute}" 2>/dev/null + local rt=$? + if [ "$rt" == 0 ]; then echo -n "yes"; else echo -n "no"; fi +} + +# Usage: if [ "$(io_curlExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_curlExists() { + io_commandExists "curl" +} + + +io_hasMatch() { + logSilly "Method ${FUNCNAME[0]}" + local result=0 + logDebug "Executing [echo \"$1\" | grep \"$2\" >/dev/null 2>&1]" + echo "$1" | grep "$2" >/dev/null 2>&1 || result=1 + return $result +} + +# Utility method to check if the string passed (usually a connection url) corresponds to this machine itself +# Failure conditions: None +# Parameters: +## $1: string to check against +# Depends on global: none +# Updates global: IS_LOCALHOST with value "yes/no" +# Returns: NA + +io_getIsLocalhost() { + logSilly "Method ${FUNCNAME[0]}" + IS_LOCALHOST="$FLAG_N" + local inputString="$1" + logDebug "Parsing [$inputString] to check if we are dealing with this machine itself" + + io_hasMatch "$inputString" "localhost" && { + logDebug "Found localhost. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for localhost" + + local hostIP=$(io_getPublicHostIP) + io_hasMatch "$inputString" "$hostIP" && { + logDebug "Found $hostIP. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostIP" + + local hostID=$(io_getPublicHostID) + io_hasMatch "$inputString" "$hostID" && { + logDebug "Found $hostID. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostID" + + local hostName=$(io_getPublicHostName) + io_hasMatch "$inputString" "$hostName" && { + logDebug "Found $hostName. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostName" + +} + +# Usage: if [ "$(io_tarExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_tarExists() { + io_commandExists "tar" +} + +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostIP() { + local OS_TYPE=$(uname) + local publicHostIP= + if [ "${OS_TYPE}" == "Darwin" ]; then + ipStatus=$(ifconfig en0 | grep "status" | awk '{print$2}') + if [ "${ipStatus}" == "active" ]; then + publicHostIP=$(ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}') + else + errorExit "Host IP could not be resolved!" + fi + elif [ "${OS_TYPE}" == "Linux" ]; then + publicHostIP=$(hostname -i 2>/dev/null || echo "127.0.0.1") + fi + publicHostIP=$(echo "${publicHostIP}" | awk '{print $1}') + echo -n "${publicHostIP}" +} + +# Will return the short host name (up to the first dot) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostName() { + echo -n "$(hostname -s)" +} + +# Will return the full host name (use this as much as possible) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostID() { + echo -n "$(hostname)" +} + +# Utility method to backup a file +# Failure conditions: NA +# Parameters: filePath +# Depends on global: none, +# Updates global: none +# Returns: NA +io_backupFile() { + logSilly "Method ${FUNCNAME[0]}" + fileName="$1" + if [ ! -f "${filePath}" ]; then + logDebug "No file: [${filePath}] to backup" + return + fi + dateTime=$(date +"%Y-%m-%d-%H-%M-%S") + targetFileName="${fileName}.backup.${dateTime}" + yes | \cp -f "$fileName" "${targetFileName}" + logger "File [${fileName}] backedup as [${targetFileName}]" +} + +# Reference https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash/4025065#4025065 +is_number() { + case "$BASH_VERSION" in + 3.1.*) + PATTERN='\^\[0-9\]+\$' + ;; + *) + PATTERN='^[0-9]+$' + ;; + esac + + [[ "$1" =~ $PATTERN ]] +} + +io_compareVersions() { + if [[ $# != 2 ]] + then + echo "Usage: min_version current minimum" + return + fi + + A="${1%%.*}" + B="${2%%.*}" + + if [[ "$A" != "$1" && "$B" != "$2" && "$A" == "$B" ]] + then + io_compareVersions "${1#*.}" "${2#*.}" + else + if is_number "$A" && is_number "$B" + then + if [[ "$A" -eq "$B" ]]; then + echo "0" + elif [[ "$A" -gt "$B" ]]; then + echo "1" + elif [[ "$A" -lt "$B" ]]; then + echo "-1" + fi + fi + fi +} + +# Reference https://stackoverflow.com/questions/369758/how-to-trim-whitespace-from-a-bash-variable +# Strip all leading and trailing spaces +# IMPORTANT- DON'T ADD LOGGING to this method +io_trim() { + local var="$1" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# temporary function will be removing it ASAP +# search for string and replace text in file +replaceText_migration_hook () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + fi +} + +# search for string and replace text in file +replaceText () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + logDebug "Replaced [$regexString] with [$replaceText] in [$file]" + fi +} + +# search for string and prepend text in file +prependText () { + local regexString="$1" + local text="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + else + sed -i -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + fi +} + +# add text to beginning of the file +addText () { + local text="$1" + local file="$2" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + else + sed -i -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + fi +} + +io_replaceString () { + local value="$1" + local firstString="$2" + local secondString="$3" + local separator=${4:-"/"} + local updateValue= + if [[ $(uname) == "Darwin" ]]; then + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + else + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + fi + echo -n "${updateValue}" +} + +_findYQ() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + local parentDir="$1" + if [ -z "$parentDir" ]; then + return + fi + logDebug "Executing command [find "${parentDir}" -name third-party -type d]" + local yq=$(find "${parentDir}" -name third-party -type d) + if [ -d "${yq}/yq" ]; then + export YQ_PATH="${yq}/yq" + fi +} + + +io_setYQPath() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + if [ "$(io_commandExists 'yq')" == "yes" ]; then + return + fi + + if [ ! -z "${JF_PRODUCT_HOME}" ] && [ -d "${JF_PRODUCT_HOME}" ]; then + _findYQ "${JF_PRODUCT_HOME}" + fi + + if [ -z "${YQ_PATH}" ] && [ ! -z "${COMPOSE_HOME}" ] && [ -d "${COMPOSE_HOME}" ]; then + _findYQ "${COMPOSE_HOME}" + fi + # TODO We can remove this block after all the code is restructured. + if [ -z "${YQ_PATH}" ] && [ ! -z "${SCRIPT_HOME}" ] && [ -d "${SCRIPT_HOME}" ]; then + _findYQ "${SCRIPT_HOME}" + fi + +} + +io_getLinuxDistribution() { + LINUX_DISTRIBUTION= + + # Make sure running on Linux + [ $(uname -s) != "Linux" ] && return + + # Find out what Linux distribution we are on + + cat /etc/*-release | grep -i Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 6.x + cat /etc/issue.net | grep Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 7.x + cat /etc/*-release | grep -i centos >/dev/null 2>&1 && LINUX_DISTRIBUTION=CentOS && LINUX_DISTRIBUTION_VER="7" || true + + # OS 8.x + grep -q -i "release 8" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="8" || true + + # OS 7.x + grep -q -i "release 7" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="7" || true + + # OS 6.x + grep -q -i "release 6" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="6" || true + + cat /etc/*-release | grep -i Red | grep -i 'VERSION=7' >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat && LINUX_DISTRIBUTION_VER="7" || true + + cat /etc/*-release | grep -i debian >/dev/null 2>&1 && LINUX_DISTRIBUTION=Debian || true + + cat /etc/*-release | grep -i ubuntu >/dev/null 2>&1 && LINUX_DISTRIBUTION=Ubuntu || true +} + +## Utility method to check ownership of folders/files +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If file is not owned by the user & group +## Parameters: + ## user + ## group + ## folder to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac +io_checkOwner () { + logSilly "Method ${FUNCNAME[0]}" + local osType=$(uname) + + if [ "${osType}" != "Linux" ]; then + logDebug "Unsupported OS. Skipping check" + return 0 + fi + + local file_to_check=$1 + local user_id_to_check=$2 + + + if [ -z "$user_id_to_check" ] || [ -z "$file_to_check" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group_id_to_check=${3:-$user_id_to_check} + local check_user_name=${4:-"no"} + + logDebug "Checking permissions on [$file_to_check] for user [$user_id_to_check] & group [$group_id_to_check]" + + local stat= + + if [ "${check_user_name}" == "yes" ]; then + stat=( $(stat -Lc "%U %G" ${file_to_check}) ) + else + stat=( $(stat -Lc "%u %g" ${file_to_check}) ) + fi + + local user_id=${stat[0]} + local group_id=${stat[1]} + + if [[ "${user_id}" != "${user_id_to_check}" ]] || [[ "${group_id}" != "${group_id_to_check}" ]] ; then + logDebug "Ownership mismatch. [${file_to_check}] is not owned by [${user_id_to_check}:${group_id_to_check}]" + return 1 + else + return 0 + fi +} + +## Utility method to change ownership of a file/folder - NON recursive +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnershipNonRecursive() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown ${user}:${group} ${targetFile}]" + chown ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to change ownership of a file. +## IMPORTANT +## If being called on a folder, should ONLY be called for fresh folders or may cause performance issues +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnership() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown -R ${user}:${group} ${targetFile}]" + chown -R ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to create third party folder structure necessary for Postgres +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## POSTGRESQL_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createPostgresDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${POSTGRESQL_DATA_ROOT}" ] && return 0 + + logDebug "Property [${POSTGRESQL_DATA_ROOT}] exists. Proceeding" + + createDir "${POSTGRESQL_DATA_ROOT}/data" + io_setOwnership "${POSTGRESQL_DATA_ROOT}" "${POSTGRES_USER}" "${POSTGRES_USER}" || errorExit "Setting ownership of [${POSTGRESQL_DATA_ROOT}] to [${POSTGRES_USER}:${POSTGRES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Nginx +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## NGINX_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createNginxDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${NGINX_DATA_ROOT}" ] && return 0 + + logDebug "Property [${NGINX_DATA_ROOT}] exists. Proceeding" + + createDir "${NGINX_DATA_ROOT}" + io_setOwnership "${NGINX_DATA_ROOT}" "${NGINX_USER}" "${NGINX_GROUP}" || errorExit "Setting ownership of [${NGINX_DATA_ROOT}] to [${NGINX_USER}:${NGINX_GROUP}] failed" +} + +## Utility method to create third party folder structure necessary for ElasticSearch +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## ELASTIC_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createElasticSearchDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${ELASTIC_DATA_ROOT}" ] && return 0 + + logDebug "Property [${ELASTIC_DATA_ROOT}] exists. Proceeding" + + createDir "${ELASTIC_DATA_ROOT}/data" + io_setOwnership "${ELASTIC_DATA_ROOT}" "${ES_USER}" "${ES_USER}" || errorExit "Setting ownership of [${ELASTIC_DATA_ROOT}] to [${ES_USER}:${ES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Redis +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## REDIS_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRedisDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${REDIS_DATA_ROOT}" ] && return 0 + + logDebug "Property [${REDIS_DATA_ROOT}] exists. Proceeding" + + createDir "${REDIS_DATA_ROOT}" + io_setOwnership "${REDIS_DATA_ROOT}" "${REDIS_USER}" "${REDIS_USER}" || errorExit "Setting ownership of [${REDIS_DATA_ROOT}] to [${REDIS_USER}:${REDIS_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Mongo +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## MONGODB_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createMongoDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${MONGODB_DATA_ROOT}" ] && return 0 + + logDebug "Property [${MONGODB_DATA_ROOT}] exists. Proceeding" + + createDir "${MONGODB_DATA_ROOT}/logs" + createDir "${MONGODB_DATA_ROOT}/configdb" + createDir "${MONGODB_DATA_ROOT}/db" + io_setOwnership "${MONGODB_DATA_ROOT}" "${MONGO_USER}" "${MONGO_USER}" || errorExit "Setting ownership of [${MONGODB_DATA_ROOT}] to [${MONGO_USER}:${MONGO_USER}] failed" +} + +## Utility method to create third party folder structure necessary for RabbitMQ +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## RABBITMQ_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRabbitMQDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${RABBITMQ_DATA_ROOT}" ] && return 0 + + logDebug "Property [${RABBITMQ_DATA_ROOT}] exists. Proceeding" + + createDir "${RABBITMQ_DATA_ROOT}" + io_setOwnership "${RABBITMQ_DATA_ROOT}" "${RABBITMQ_USER}" "${RABBITMQ_USER}" || errorExit "Setting ownership of [${RABBITMQ_DATA_ROOT}] to [${RABBITMQ_USER}:${RABBITMQ_USER}] failed" +} + +# Add or replace a property in provided properties file +addOrReplaceProperty() { + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + local delimiter=${4:-"="} + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}\s*${delimiter}.*$" ${propertiesPath} > /dev/null 2>&1 + [ $? -ne 0 ] && echo -e "\n${propertyName}${delimiter}${propertyValue}" >> ${propertiesPath} + sed -i -e "s|^${propertyName}\s*${delimiter}.*$|${propertyName}${delimiter}${propertyValue}|g;" ${propertiesPath} +} + +# Set property only if its not set +io_setPropertyNoOverride(){ + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}:" ${propertiesPath} > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${propertyName}: ${propertyValue}" >> ${propertiesPath} || warn "Setting property ${propertyName}: ${propertyValue} in [ ${propertiesPath} ] failed" + else + logger "Skipping update of property : ${propertyName}" >&6 + fi +} + +# Add a line to a file if it doesn't already exist +addLine() { + local line_to_add=$1 + local target_file=$2 + logger "Trying to add line $1 to $2" >&6 2>&1 + cat "$target_file" | grep -F "$line_to_add" -wq >&6 2>&1 + if [ $? != 0 ]; then + logger "Line does not exist and will be added" >&6 2>&1 + echo $line_to_add >> $target_file || errorExit "Could not update $target_file" + fi +} + +# Utility method to check if a value (first paramter) exists in an array (2nd parameter) +# 1st parameter "value to find" +# 2nd parameter "The array to search in. Please pass a string with each value separated by space" +# Example: containsElement "y" "y Y n N" +containsElement () { + local searchElement=$1 + local searchArray=($2) + local found=1 + for elementInIndex in "${searchArray[@]}";do + if [[ $elementInIndex == $searchElement ]]; then + found=0 + fi + done + return $found +} + +# Utility method to get user's choice +# 1st parameter "what to ask the user" +# 2nd parameter "what choices to accept, separated by spaces" +# 3rd parameter "what is the default choice (to use if the user simply presses Enter)" +# Example 'getUserChoice "Are you feeling lucky? Punk!" "y n Y N" "y"' +getUserChoice(){ + configureLogOutput + read_timeout=${read_timeout:-0.5} + local choice="na" + local text_to_display=$1 + local choices=$2 + local default_choice=$3 + users_choice= + + until containsElement "$choice" "$choices"; do + echo "";echo ""; + sleep $read_timeout #This ensures correct placement of the question. + read -p "$text_to_display :" choice + : ${choice:=$default_choice} + done + users_choice=$choice + echo -e "\n$text_to_display: $users_choice" >&6 + sleep $read_timeout #This ensures correct logging +} + +setFilePermission () { + local permission=$1 + local file=$2 + chmod "${permission}" "${file}" || warn "Setting permission ${permission} to file [ ${file} ] failed" +} + + +#setting required paths +setAppDir (){ + SCRIPT_DIR=$(dirname $0) + SCRIPT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + APP_DIR="`cd "${SCRIPT_HOME}";pwd`" +} + +ZIP_TYPE="zip" +COMPOSE_TYPE="compose" +HELM_TYPE="helm" +RPM_TYPE="rpm" +DEB_TYPE="debian" + +sourceScript () { + local file="$1" + + [ ! -z "${file}" ] || errorExit "target file is not passed to source a file" + + if [ ! -f "${file}" ]; then + errorExit "${file} file is not found" + else + source "${file}" || errorExit "Unable to source ${file}, please check if the user ${USER} has permissions to perform this action" + fi +} +# Source required helpers +initHelpers () { + local systemYamlHelper="${APP_DIR}/systemYamlHelper.sh" + local thirdPartyDir=$(find ${APP_DIR}/.. -name third-party -type d) + export YQ_PATH="${thirdPartyDir}/yq" + LIBXML2_PATH="${thirdPartyDir}/libxml2/bin/xmllint" + export LD_LIBRARY_PATH="${thirdPartyDir}/libxml2/lib" + sourceScript "${systemYamlHelper}" +} +# Check migration info yaml file available in the path +checkMigrationInfoYaml () { + + if [[ -f "${APP_DIR}/migrationHelmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationHelmInfo.yaml" + INSTALLER="${HELM_TYPE}" + elif [[ -f "${APP_DIR}/migrationZipInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationZipInfo.yaml" + INSTALLER="${ZIP_TYPE}" + elif [[ -f "${APP_DIR}/migrationRpmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationRpmInfo.yaml" + INSTALLER="${RPM_TYPE}" + elif [[ -f "${APP_DIR}/migrationDebInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationDebInfo.yaml" + INSTALLER="${DEB_TYPE}" + elif [[ -f "${APP_DIR}/migrationComposeInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationComposeInfo.yaml" + INSTALLER="${COMPOSE_TYPE}" + else + errorExit "File migration Info yaml does not exist in [${APP_DIR}]" + fi +} + +retrieveYamlValue () { + local yamlPath="$1" + local value="$2" + local output="$3" + local message="$4" + + [[ -z "${yamlPath}" ]] && errorExit "yamlPath is mandatory to get value from ${MIGRATION_SYSTEM_YAML_INFO}" + + getYamlValue "${yamlPath}" "${MIGRATION_SYSTEM_YAML_INFO}" "false" + value="${YAML_VALUE}" + if [[ -z "${value}" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "Empty value for ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + elif [[ "${output}" == "Skip" ]]; then + return + else + errorExit "${message}" + fi + fi +} + +checkEnv () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + # check Environment JF_PRODUCT_HOME is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_PRODUCT_HOME")" + if [[ -z "${NEW_DATA_DIR}" ]]; then + errorExit "Environment variable JF_PRODUCT_HOME is not set, this is required to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + getCustomDataDir_hook + NEW_DATA_DIR="${OLD_DATA_DIR}" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + else + # check Environment JF_ROOT_DATA_DIR is set before migration + OLD_DATA_DIR="$(evalVariable "OLD_DATA_DIR" "JF_ROOT_DATA_DIR")" + # check Environment JF_ROOT_DATA_DIR is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_ROOT_DATA_DIR")" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi + +} + +getDataDir () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}"|| "${INSTALLER}" == "${HELM_TYPE}" ]]; then + checkEnv + else + getCustomDataDir_hook + NEW_DATA_DIR="`cd "${APP_DIR}"/../../;pwd`" + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi +} + +# Retrieve Product name from MIGRATION_SYSTEM_YAML_INFO +getProduct () { + retrieveYamlValue "migration.product" "${YAML_VALUE}" "Fail" "Empty value under ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + PRODUCT="${YAML_VALUE}" + PRODUCT=$(echo "${PRODUCT}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + if [[ "${PRODUCT}" != "artifactory" && "${PRODUCT}" != "distribution" && "${PRODUCT}" != "xray" ]]; then + errorExit "migration.product in [${MIGRATION_SYSTEM_YAML_INFO}] is not correct, please set based on product as ARTIFACTORY or DISTRIBUTION" + fi + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + JF_USER="${PRODUCT}" + fi +} +# Compare product version with minProductVersion and maxProductVersion +migrateCheckVersion () { + local productVersion="$1" + local minProductVersion="$2" + local maxProductVersion="$3" + local productVersion618="6.18.0" + local unSupportedProductVersions7=("7.2.0 7.2.1") + + if [[ "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 1 ]]; then + logger "Migration not necessary. ${PRODUCT} is already ${productVersion}" + exit 11 + elif [[ "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 1 ]]; then + if [[ ("$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 1) && " ${unSupportedProductVersions7[@]} " =~ " ${CURRENT_VERSION} " ]]; then + touch /tmp/error; + errorExit "Current ${PRODUCT} version (${productVersion}) does not support migration to ${CURRENT_VERSION}" + else + bannerStart "Detected ${PRODUCT} ${productVersion}, initiating migration" + fi + else + logger "Current ${PRODUCT} ${productVersion} version is not supported for migration" + exit 1 + fi +} + +getProductVersion () { + local minProductVersion="$1" + local maxProductVersion="$2" + local newfilePath="$3" + local oldfilePath="$4" + local propertyInDocker="$5" + local property="$6" + local productVersion= + local status= + + if [[ "$INSTALLER" == "${COMPOSE_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + elif [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${propertyInDocker}" "${newfilePath}")" + status="fail" + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + exit 0 + fi + elif [[ "$INSTALLER" == "${HELM_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + else + productVersion="${CURRENT_VERSION}" + [[ -z "${productVersion}" || "${productVersion}" == "" ]] && logger "${PRODUCT} CURRENT_VERSION is not set" && exit 0 + fi + else + if [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${property}" "${newfilePath}")" + status="fail" + elif [[ -f "${oldfilePath}" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + status="success" + else + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + logger "File [${newfilePath}] not found to get current version." + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + fi + exit 0 + fi + fi + if [[ -z "${productVersion}" || "${productVersion}" == "" ]]; then + [[ "${status}" == "success" ]] && logger "No version found in file [${oldfilePath}]." + [[ "${status}" == "fail" ]] && logger "No version found in file [${newfilePath}]." + exit 0 + fi + + migrateCheckVersion "${productVersion}" "${minProductVersion}" "${maxProductVersion}" +} + +readKey () { + local property="$1" + local file="$2" + local version= + + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + version="${value}" && check=true && break + else + check=false + fi + done < "${file}" + if [[ "${check}" == "false" ]]; then + return + fi + echo "${version}" +} + +# create Log directory +createLogDir () { + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" + fi +} + +# Creating migration log file +creationMigrateLog () { + local LOG_FILE_NAME="migration.log" + createLogDir + local MIGRATION_LOG_FILE="${NEW_DATA_DIR}/log/${LOG_FILE_NAME}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + MIGRATION_LOG_FILE="${SCRIPT_HOME}/${LOG_FILE_NAME}" + fi + touch "${MIGRATION_LOG_FILE}" + setFilePermission "${LOG_FILE_PERMISSION}" "${MIGRATION_LOG_FILE}" + exec &> >(tee -a "${MIGRATION_LOG_FILE}") +} +# Set path where system.yaml should create +setSystemYamlPath () { + SYSTEM_YAML_PATH="${NEW_DATA_DIR}/etc/system.yaml" + if [[ "${INSTALLER}" != "${HELM_TYPE}" ]]; then + logger "system.yaml will be created in path [${SYSTEM_YAML_PATH}]" + fi +} +# Create directory +createDirectory () { + local directory="$1" + local output="$2" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${directory}" + mkdir -p "${directory}" && check=true || check=false + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi + setOwnershipBasedOnInstaller "${directory}" +} + +setOwnershipBasedOnInstaller () { + local directory="$1" + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + chown -R ${USER_TO_CHECK}:${GROUP_TO_CHECK} "${directory}" || warn "Setting ownership on $directory failed" + elif [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + io_setOwnership "${directory}" "${JF_USER}" "${JF_USER}" + fi +} + +getUserAndGroup () { + local file="$1" + read uid gid <<<$(stat -c '%U %G' ${file}) + USER_TO_CHECK="${uid}" + GROUP_TO_CHECK="${gid}" +} + +# set ownership +getUserAndGroupFromFile () { + case $PRODUCT in + artifactory) + getUserAndGroup "/etc/opt/jfrog/artifactory/artifactory.properties" + ;; + distribution) + getUserAndGroup "${OLD_DATA_DIR}/etc/versions.properties" + ;; + xray) + getUserAndGroup "${OLD_DATA_DIR}/security/master.key" + ;; + esac +} + +# creating required directories +createRequiredDirs () { + bannerSubSection "CREATING REQUIRED DIRECTORIES" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${JF_USER}" "${JF_USER}" "yes" + io_setOwnership "${NEW_DATA_DIR}" "${JF_USER}" "${JF_USER}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data/postgres" "${POSTGRES_USER}" "${POSTGRES_USER}" "yes" + fi + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + fi +} + +# Check entry in map is format +checkMapEntry () { + local entry="$1" + + [[ "${entry}" != *"="* ]] && echo -n "false" || echo -n "true" +} +# Check value Empty and warn +warnIfEmpty () { + local filePath="$1" + local yamlPath="$2" + local check= + + if [[ -z "${filePath}" ]]; then + warn "Empty value in yamlpath [${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + check=false + else + check=true + fi + echo "${check}" +} + +logCopyStatus () { + local status="$1" + local logMessage="$2" + local warnMessage="$3" + + [[ "${status}" == "success" ]] && logger "${logMessage}" + [[ "${status}" == "fail" ]] && warn "${warnMessage}" +} +# copy contents from source to destination +copyCmd () { + local source="$1" + local target="$2" + local mode="$3" + local status= + + case $mode in + unique) + cp -up "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + specific) + cp -pf "${source}" "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied file [${source}] to [${target}]" "Failed to copy file [${source}] to [${target}]" + ;; + patternFiles) + cp -pf "${source}"* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied files matching [${source}*] to [${target}]" "Failed to copy files matching [${source}*] to [${target}]" + ;; + full) + cp -prf "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + esac +} +# Check contents exist in source before copying +copyOnContentExist () { + local source="$1" + local target="$2" + local mode="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + copyCmd "${source}" "${target}" "${mode}" + else + logger "No contents to copy from [${source}]" + fi +} + +# move source to destination +moveCmd () { + local source="$1" + local target="$2" + local status= + + mv -f "${source}" "${target}" && status="success" || status="fail" + [[ "${status}" == "success" ]] && logger "Successfully moved directory [${source}] to [${target}]" + [[ "${status}" == "fail" ]] && warn "Failed to move directory [${source}] to [${target}]" +} + +# symlink target to source +symlinkCmd () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + local check=false + + if [[ "${symlinkSubDir}" == "subDir" ]]; then + ln -sf "${source}"/* "${target}" && check=true || check=false + else + ln -sf "${source}" "${target}" && check=true || check=false + fi + + [[ "${check}" == "true" ]] && logger "Successfully symlinked directory [${target}] to old [${source}]" + [[ "${check}" == "false" ]] && warn "Symlink operation failed" +} +# Check contents exist in source before symlinking +symlinkOnExist () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + if [[ "${symlinkSubDir}" == "subDir" ]]; then + symlinkCmd "${source}" "${target}" "subDir" + else + symlinkCmd "${source}" "${target}" + fi + else + logger "No contents to symlink from [${source}]" + fi +} + +prependDir () { + local absolutePath="$1" + local fullPath="$2" + local sourcePath= + + if [[ "${absolutePath}" = \/* ]]; then + sourcePath="${absolutePath}" + else + sourcePath="${fullPath}" + fi + echo "${sourcePath}" +} + +getFirstEntry (){ + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $1}' +} + +getSecondEntry () { + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $2}' +} +# To get absolutePath +pathResolver () { + local directoryPath="$1" + local dataDir= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Warning" + dataDir="${YAML_VALUE}" + cd "${dataDir}" + else + cd "${OLD_DATA_DIR}" + fi + absoluteDir="`cd "${directoryPath}";pwd`" + echo "${absoluteDir}" +} + +checkPathResolver () { + local value="$1" + + if [[ "${value}" == \/* ]]; then + value="${value}" + else + value="$(pathResolver "${value}")" + fi + echo "${value}" +} + +propertyMigrate () { + local entry="$1" + local filePath="$2" + local fileName="$3" + local check=false + + local yamlPath="$(getFirstEntry "${entry}")" + local property="$(getSecondEntry "${entry}")" + if [[ -z "${property}" ]]; then + warn "Property is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${property}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + value="$(migrateResolveDerbyPath "${key}" "${value}")" + value="$(migrateResolveHaDirPath "${key}" "${value}")" + value="$(updatePostgresUrlString_Hook "${yamlPath}" "${value}")" + fi + if [[ "${key}" == "context.url" ]]; then + local ip=$(echo "${value}" | awk -F/ '{print $3}' | sed 's/:.*//') + setSystemValue "shared.node.ip" "${ip}" "${SYSTEM_YAML_PATH}" + logger "Setting [shared.node.ip] with [${ip}] in system.yaml" + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" && logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" && check=true && break || check=false + fi + done < "${NEW_DATA_DIR}/${filePath}/${fileName}" + [[ "${check}" == "false" ]] && logger "Property [${property}] not found in file [${fileName}]" +} + +setHaEnabled_hook () { + echo "" +} + +migratePropertiesFiles () { + local fileList= + local filePath= + local fileName= + local map= + + retrieveYamlValue "migration.propertyFiles.files" "fileList" "Skip" + fileList="${YAML_VALUE}" + if [[ -z "${fileList}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF PROPERTY FILES" + for file in ${fileList}; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.propertyFiles.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.propertyFiles.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + if [[ "$(checkFileExists "${NEW_DATA_DIR}/${filePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + # setting haEnabled with true only if ha-node.properties is present + setHaEnabled_hook "${filePath}" + retrieveYamlValue "migration.propertyFiles.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + propertyMigrate "${entry}" "${filePath}" "${fileName}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=property" + fi + done + else + logger "File [${fileName}] was not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} + +createTargetDir () { + local mountDir="$1" + local target="$2" + + logger "Target directory not found [${mountDir}/${target}], creating it" + createDirectoryRecursive "${mountDir}" "${target}" "Warning" +} + +createDirectoryRecursive () { + local mountDir="$1" + local target="$2" + local output="$3" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${mountDir}/${target}" + local directory=$(echo "${target}" | tr '/' ' ' ) + local targetDir="${mountDir}" + for dir in ${directory}; + do + targetDir="${targetDir}/${dir}" + mkdir -p "${targetDir}" && check=true || check=false + setOwnershipBasedOnInstaller "${targetDir}" + done + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi +} + +copyOperation () { + local source="$1" + local target="$2" + local mode="$3" + local check=false + local targetDataDir= + local targetLink= + local date= + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + #remove source if it is a symlink + if [[ -L "${source}" ]]; then + targetLink=$(readlink -f "${source}") + logger "Removing the symlink [${source}] pointing to [${targetLink}]" + rm -f "${source}" + source=${targetLink} + fi + if [[ "$(checkDirExists "${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path" + return + fi + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + logger "No contents to copy from [${source}]" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyOnContentExist "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copySpecificFiles () { + local source="$1" + local target="$2" + local mode="$3" + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkFileExists "${source}")" != "true" ]]; then + logger "Source file [${source}] does not exist in path" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copyPatternMatchingFiles () { + local source="$1" + local target="$2" + local mode="$3" + local sourcePath="${4}" + + # prepend OLD_DATA_DIR only if source is relative path + sourcePath="$(prependDir "${sourcePath}" "${OLD_DATA_DIR}/${sourcePath}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkDirExists "${sourcePath}")" != "true" ]]; then + logger "Source [${sourcePath}] directory not found in path" + return + fi + if ls "${sourcePath}/${source}"* 1> /dev/null 2>&1; then + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${sourcePath}/${source}" "${targetDataDir}/${target}" "${mode}" + else + logger "Source file [${sourcePath}/${source}*] does not exist in path" + fi +} + +copyLogMessage () { + local mode="$1" + case $mode in + specific) + logger "Copy file [${source}] to target [${targetDataDir}/${target}]" + ;; + patternFiles) + logger "Copy files matching [${sourcePath}/${source}*] to target [${targetDataDir}/${target}]" + ;; + full) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + unique) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + esac +} + +copyBannerMessages () { + local mode="$1" + local textMode="$2" + case $mode in + specific) + bannerSection "COPY ${textMode} FILES" + ;; + patternFiles) + bannerSection "COPY MATCHING ${textMode}" + ;; + full) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + unique) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + esac +} + +invokeCopyFunctions () { + local mode="$1" + local source="$2" + local target="$3" + + case $mode in + specific) + copySpecificFiles "${source}" "${target}" "${mode}" + ;; + patternFiles) + retrieveYamlValue "migration.${copyFormat}.sourcePath" "map" "Warning" + local sourcePath="${YAML_VALUE}" + copyPatternMatchingFiles "${source}" "${target}" "${mode}" "${sourcePath}" + ;; + full) + copyOperation "${source}" "${target}" "${mode}" + ;; + unique) + copyOperation "${source}" "${target}" "${mode}" + ;; + esac +} +# Copies contents from source directory and target directory +copyDataDirectories () { + local copyFormat="$1" + local mode="$2" + local map= + local source= + local target= + local textMode= + local targetDataDir= + local copyFormatValue= + + retrieveYamlValue "migration.${copyFormat}" "${copyFormat}" "Skip" + copyFormatValue="${YAML_VALUE}" + if [[ -z "${copyFormatValue}" ]]; then + return + fi + textMode=$(echo "${mode}" | tr '[:lower:]' '[:upper:]' 2>/dev/null) + copyBannerMessages "${mode}" "${textMode}" + retrieveYamlValue "migration.${copyFormat}.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeCopyFunctions "${mode}" "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +invokeMoveFunctions () { + local source="$1" + local target="$2" + local sourceDataDir= + local targetBasename= + # prepend OLD_DATA_DIR only if source is relative path + sourceDataDir=$(prependDir "${source}" "${OLD_DATA_DIR}/${source}") + targetBasename=$(dirname "${target}") + logger "Moving directory source [${sourceDataDir}] to target [${NEW_DATA_DIR}/${target}]" + if [[ "$(checkDirExists "${sourceDataDir}")" != "true" ]]; then + logger "Directory [${sourceDataDir}] not found in path to move" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${targetBasename}")" != "true" ]]; then + createTargetDir "${NEW_DATA_DIR}" "${targetBasename}" + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/${target}" + else + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/tempDir" + moveCmd "${NEW_DATA_DIR}/tempDir" "${NEW_DATA_DIR}/${target}" + fi +} + +# Move source directory and target directory +moveDirectories () { + local moveDataDirectories= + local map= + local source= + local target= + + retrieveYamlValue "migration.moveDirectories" "moveDirectories" "Skip" + moveDirectories="${YAML_VALUE}" + if [[ -z "${moveDirectories}" ]]; then + return + fi + bannerSection "MOVE DIRECTORIES" + retrieveYamlValue "migration.moveDirectories.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeMoveFunctions "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +# Trim masterKey if its generated using hex 32 +trimMasterKey () { + local masterKeyDir=/opt/jfrog/artifactory/var/etc/security + local oldMasterKey=$(<${masterKeyDir}/master.key) + local oldMasterKey_Length=$(echo ${#oldMasterKey}) + local newMasterKey= + if [[ ${oldMasterKey_Length} -gt 32 ]]; then + bannerSection "TRIM MASTERKEY" + newMasterKey=$(echo ${oldMasterKey:0:32}) + cp ${masterKeyDir}/master.key ${masterKeyDir}/backup_master.key + logger "Original masterKey is backed up : ${masterKeyDir}/backup_master.key" + rm -rf ${masterKeyDir}/master.key + echo ${newMasterKey} > ${masterKeyDir}/master.key + logger "masterKey is trimmed : ${masterKeyDir}/master.key" + fi +} + +copyDirectories () { + + copyDataDirectories "copyFiles" "full" + copyDataDirectories "copyUniqueFiles" "unique" + copyDataDirectories "copySpecificFiles" "specific" + copyDataDirectories "copyPatternMatchingFiles" "patternFiles" +} + +symlinkDir () { + local source="$1" + local target="$2" + local targetDir= + local basename= + local targetParentDir= + + targetDir="$(dirname "${target}")" + if [[ "${targetDir}" == "${source}" ]]; then + # symlink the sub directories + createDirectory "${NEW_DATA_DIR}/${target}" "Warning" + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" "subDir" + basename="$(basename "${target}")" + cd "${NEW_DATA_DIR}/${target}" && rm -f "${basename}" + fi + else + targetParentDir="$(dirname "${NEW_DATA_DIR}/${target}")" + createDirectory "${targetParentDir}" "Warning" + if [[ "$(checkDirExists "${targetParentDir}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" + fi + fi +} + +symlinkOperation () { + local source="$1" + local target="$2" + local check=false + local targetLink= + local date= + + # Check if source is a link and do symlink + if [[ -L "${OLD_DATA_DIR}/${source}" ]]; then + targetLink=$(readlink -f "${OLD_DATA_DIR}/${source}") + symlinkOnExist "${targetLink}" "${NEW_DATA_DIR}/${target}" + else + # check if source is directory and do symlink + if [[ "$(checkDirExists "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path to symlink" + return + fi + if [[ "$(checkDirContents "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "No contents found in [${OLD_DATA_DIR}/${source}] to symlink" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" != "true" ]]; then + logger "Target directory [${NEW_DATA_DIR}/${target}] does not exist to create symlink, creating it" + symlinkDir "${source}" "${target}" + else + rm -rf "${NEW_DATA_DIR}/${target}" && check=true || check=false + [[ "${check}" == "false" ]] && warn "Failed to remove contents in [${NEW_DATA_DIR}/${target}/]" + symlinkDir "${source}" "${target}" + fi + fi +} +# Creates a symlink path - Source directory to which the symbolic link should point. +symlinkDirectories () { + local linkFiles= + local map= + local source= + local target= + + retrieveYamlValue "migration.linkFiles" "linkFiles" "Skip" + linkFiles="${YAML_VALUE}" + if [[ -z "${linkFiles}" ]]; then + return + fi + bannerSection "SYMLINK DIRECTORIES" + retrieveYamlValue "migration.linkFiles.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + logger "Symlink directory [${NEW_DATA_DIR}/${target}] to old [${OLD_DATA_DIR}/${source}]" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + symlinkOperation "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +updateConnectionString () { + local yamlPath="$1" + local value="$2" + local mongoPath="shared.mongo.url" + local rabbitmqPath="shared.rabbitMq.url" + local postgresPath="shared.database.url" + local redisPath="shared.redis.connectionString" + local mongoConnectionString="mongo.connectionString" + local sourceKey= + local hostIp=$(io_getPublicHostIP) + local hostKey= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + # Replace @postgres:,@mongodb:,@rabbitmq:,@redis: to @{hostIp}: (Compose Installer) + hostKey="@${hostIp}:" + case $yamlPath in + ${postgresPath}) + sourceKey="@postgres:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoPath}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${rabbitmqPath}) + sourceKey="@rabbitmq:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${redisPath}) + sourceKey="@redis:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoConnectionString}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + esac + fi + echo -n "${value}" +} + +yamlMigrate () { + local entry="$1" + local sourceFile="$2" + local value= + local yamlPath= + local key= + yamlPath="$(getFirstEntry "${entry}")" + key="$(getSecondEntry "${entry}")" + if [[ -z "${key}" ]]; then + warn "key is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + getYamlValue "${key}" "${sourceFile}" "false" + value="${YAML_VALUE}" + if [[ ! -z "${value}" ]]; then + value=$(updateConnectionString "${yamlPath}" "${value}") + fi + if [[ "${PRODUCT}" == "artifactory" ]]; then + replicatorProfiling + fi + if [[ -z "${value}" ]]; then + logger "No value for [${key}] in [${sourceFile}]" + else + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the key [${key}] in system.yaml" + fi +} + +migrateYamlFile () { + local files= + local filePath= + local fileName= + local sourceFile= + local map= + retrieveYamlValue "migration.yaml.files" "files" "Skip" + files="${YAML_VALUE}" + if [[ -z "${files}" ]]; then + return + fi + bannerSection "MIGRATION OF YAML FILES" + for file in $files; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.yaml.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.yaml.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + sourceFile="${NEW_DATA_DIR}/${filePath}/${fileName}" + if [[ "$(checkFileExists "${sourceFile}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + retrieveYamlValue "migration.yaml.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + yamlMigrate "${entry}" "${sourceFile}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done + else + logger "File [${fileName}] is not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} +# updates the key and value in system.yaml +updateYamlKeyValue () { + local entry="$1" + local value= + local yamlPath= + local key= + + yamlPath="$(getFirstEntry "${entry}")" + value="$(getSecondEntry "${entry}")" + if [[ -z "${value}" ]]; then + warn "value is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value [${value}] in system.yaml" +} + +updateSystemYamlFile () { + local updateYaml= + local map= + + retrieveYamlValue "migration.updateSystemYaml" "updateYaml" "Skip" + updateSystemYaml="${YAML_VALUE}" + if [[ -z "${updateSystemYaml}" ]]; then + return + fi + bannerSection "UPDATE SYSTEM YAML FILE WITH KEY AND VALUES" + retrieveYamlValue "migration.updateSystemYaml.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ -z "${map}" ]]; then + return + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + updateYamlKeyValue "${entry}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done +} + +backupFiles_hook () { + logSilly "Method ${FUNCNAME[0]}" +} + +backupDirectory () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyOnContentExist "${targetDir}" "${backupDirectory}/${dir}" "full" + fi +} + +removeOldDirectory () { + local backupDir="$1" + local entry="$2" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${entry}" "${OLD_DATA_DIR}/${entry}")" + local outputCheckDirExists="$(checkDirExists "${targetDir}")" + if [[ "${outputCheckDirExists}" != "true" ]]; then + logger "No [${targetDir}] directory found to delete" + echo ""; + return + fi + backupDirectory "${backupDir}" "${entry}" "${targetDir}" + rm -rf "${targetDir}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed directory [${targetDir}]" + [[ "${check}" == "false" ]] && warn "Failed to remove directory [${targetDir}]" + echo ""; +} + +cleanUpOldDataDirectories () { + local cleanUpOldDataDir= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldDataDir" "cleanUpOldDataDir" "Skip" + cleanUpOldDataDir="${YAML_VALUE}" + if [[ -z "${cleanUpOldDataDir}" ]]; then + return + fi + bannerSection "CLEAN UP OLD DATA DIRECTORIES" + retrieveYamlValue "migration.cleanUpOldDataDir.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old data configurations are backedup in [${backupDir}] directory ******" + backupFiles_hook "${backupDir}/${PRODUCT}" + for entry in $map; + do + removeOldDirectory "${backupDir}" "${entry}" + done +} + +backupFiles () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local fileName="$4" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyCmd "${targetDir}/${fileName}" "${backupDirectory}/${dir}" "specific" + fi +} + +removeOldFiles () { + local backupDir="$1" + local directoryName="$2" + local fileName="$3" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${directoryName}" "${OLD_DATA_DIR}/${directoryName}")" + local outputCheckFileExists="$(checkFileExists "${targetDir}/${fileName}")" + if [[ "${outputCheckFileExists}" != "true" ]]; then + logger "No [${targetDir}/${fileName}] file found to delete" + return + fi + backupFiles "${backupDir}" "${directoryName}" "${targetDir}" "${fileName}" + rm -f "${targetDir}/${fileName}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed file [${targetDir}/${fileName}]" + [[ "${check}" == "false" ]] && warn "Failed to remove file [${targetDir}/${fileName}]" + echo ""; +} + +cleanUpOldFiles () { + local cleanUpFiles= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldFiles" "cleanUpOldFiles" "Skip" + cleanUpOldFiles="${YAML_VALUE}" + if [[ -z "${cleanUpOldFiles}" ]]; then + return + fi + bannerSection "CLEAN UP OLD FILES" + retrieveYamlValue "migration.cleanUpOldFiles.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old files are backedup in [${backupDir}] directory ******" + for entry in $map; + do + local outputCheckMapEntry="$(checkMapEntry "${entry}")" + if [[ "${outputCheckMapEntry}" != "true" ]]; then + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e directoryName=fileName" + fi + local fileName="$(getSecondEntry "${entry}")" + local directoryName="$(getFirstEntry "${entry}")" + [[ -z "${fileName}" ]] && warn "File name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${directoryName}" ]] && warn "Directory name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + removeOldFiles "${backupDir}" "${directoryName}" "${fileName}" + echo ""; + done +} + +startMigration () { + bannerSection "STARTING MIGRATION" +} + +endMigration () { + bannerSection "MIGRATION COMPLETED SUCCESSFULLY" +} + +initialize () { + setAppDir + _pauseExecution "setAppDir" + initHelpers + _pauseExecution "initHelpers" + checkMigrationInfoYaml + _pauseExecution "checkMigrationInfoYaml" + getProduct + _pauseExecution "getProduct" + getDataDir + _pauseExecution "getDataDir" +} + +main () { + case $PRODUCT in + artifactory) + migrateArtifactory + ;; + distribution) + migrateDistribution + ;; + xray) + migrationXray + ;; + esac + exit 0 +} + +# Ensures meta data is logged +LOG_BEHAVIOR_ADD_META="$FLAG_Y" + + +migrateResolveDerbyPath () { + local key="$1" + local value="$2" + + if [[ "${key}" == "url" && "${value}" == *"db.home"* ]]; then + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + derbyPath="/opt/jfrog/artifactory/var/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + else + derbyPath="${NEW_DATA_DIR}/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + fi + fi + echo "${value}" +} + +migrateResolveHaDirPath () { + local key="$1" + local value="$2" + + if [[ "${INSTALLER}" == "${RPM_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" || "${INSTALLER}" == "${DEB_TYPE}" ]]; then + if [[ "${key}" == "artifactory.ha.data.dir" || "${key}" == "artifactory.ha.backup.dir" ]]; then + value=$(checkPathResolver "${value}") + fi + fi + echo "${value}" +} +updatePostgresUrlString_Hook () { + local yamlPath="$1" + local value="$2" + local hostIp=$(io_getPublicHostIP) + local sourceKey="//postgresql:" + if [[ "${yamlPath}" == "shared.database.url" ]]; then + value=$(io_replaceString "${value}" "${sourceKey}" "//${hostIp}:" "#") + fi + echo "${value}" +} +# Check Artifactory product version +checkArtifactoryVersion () { + local minProductVersion="6.0.0" + local maxProductVersion="7.0.0" + local propertyInDocker="ARTIFACTORY_VERSION" + local property="artifactory.version" + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + local newfilePath="${APP_DIR}/../.env" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + else + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="/etc/opt/jfrog/artifactory/artifactory.properties" + fi + + getProductVersion "${minProductVersion}" "${maxProductVersion}" "${newfilePath}" "${oldfilePath}" "${propertyInDocker}" "${property}" +} + +getCustomDataDir_hook () { + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Fail" + OLD_DATA_DIR="${YAML_VALUE}" +} + +# Get protocol value of connector +getXmlConnectorProtocol () { + local i="$1" + local filePath="$2" + local fileName="$3" + local protocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@protocol' ${filePath}/${fileName} 2>/dev/null |awk -F"=" '{print $2}' | tr -d '"') + echo -e "${protocolValue}" +} + +# Get all attributes of connector +getXmlConnectorAttributes () { + local i="$1" + local filePath="$2" + local fileName="$3" + local connectorAttributes=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@*' ${filePath}/${fileName} 2>/dev/null) + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + echo "${connectorAttributes}" +} + +# Get port value of connector +getXmlConnectorPort () { + local i="$1" + local filePath="$2" + local fileName="$3" + local portValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@port' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${portValue}" +} + +# Get maxThreads value of connector +getXmlConnectorMaxThreads () { + local i="$1" + local filePath="$2" + local fileName="$3" + local maxThreadValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@maxThreads' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${maxThreadValue}" +} +# Get sendReasonPhrase value of connector +getXmlConnectorSendReasonPhrase () { + local i="$1" + local filePath="$2" + local fileName="$3" + local sendReasonPhraseValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sendReasonPhrase' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${sendReasonPhraseValue}" +} +# Get relaxedPathChars value of connector +getXmlConnectorRelaxedPathChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedPathCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedPathChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedPathCharsValue=$(io_trim "${relaxedPathCharsValue}") + echo -e "${relaxedPathCharsValue}" +} +# Get relaxedQueryChars value of connector +getXmlConnectorRelaxedQueryChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedQueryCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedQueryChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedQueryCharsValue=$(io_trim "${relaxedQueryCharsValue}") + echo -e "${relaxedQueryCharsValue}" +} + +# Updating system.yaml with Connector port +setConnectorPort () { + local yamlPath="$1" + local valuePort="$2" + local portYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${valuePort}" ]]; then + warn "port value is empty, could not migrate to system.yaml" + return + fi + ## Getting port yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" portYamlPath "Warning" + portYamlPath="${YAML_VALUE}" + if [[ -z "${portYamlPath}" ]]; then + return + fi + setSystemValue "${portYamlPath}" "${valuePort}" "${SYSTEM_YAML_PATH}" + logger "Setting [${portYamlPath}] with value [${valuePort}] in system.yaml" +} + +# Updating system.yaml with Connector maxThreads +setConnectorMaxThread () { + local yamlPath="$1" + local threadValue="$2" + local maxThreadYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${threadValue}" ]]; then + return + fi + ## Getting max Threads yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" maxThreadYamlPath "Warning" + maxThreadYamlPath="${YAML_VALUE}" + if [[ -z "${maxThreadYamlPath}" ]]; then + return + fi + setSystemValue "${maxThreadYamlPath}" "${threadValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${maxThreadYamlPath}] with value [${threadValue}] in system.yaml" +} + +# Updating system.yaml with Connector sendReasonPhrase +setConnectorSendReasonPhrase () { + local yamlPath="$1" + local sendReasonPhraseValue="$2" + local sendReasonPhraseYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${sendReasonPhraseValue}" ]]; then + return + fi + ## Getting sendReasonPhrase yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" sendReasonPhraseYamlPath "Warning" + sendReasonPhraseYamlPath="${YAML_VALUE}" + if [[ -z "${sendReasonPhraseYamlPath}" ]]; then + return + fi + setSystemValue "${sendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${sendReasonPhraseYamlPath}] with value [${sendReasonPhraseValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedPathChars +setConnectorRelaxedPathChars () { + local yamlPath="$1" + local relaxedPathCharsValue="$2" + local relaxedPathCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedPathCharsValue}" ]]; then + return + fi + ## Getting relaxedPathChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedPathCharsYamlPath "Warning" + relaxedPathCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedPathCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedPathCharsYamlPath}" "${relaxedPathCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedPathCharsYamlPath}] with value [${relaxedPathCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedQueryChars +setConnectorRelaxedQueryChars () { + local yamlPath="$1" + local relaxedQueryCharsValue="$2" + local relaxedQueryCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedQueryCharsValue}" ]]; then + return + fi + ## Getting relaxedQueryChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedQueryCharsYamlPath "Warning" + relaxedQueryCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedQueryCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedQueryCharsYamlPath}" "${relaxedQueryCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedQueryCharsYamlPath}] with value [${relaxedQueryCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connectors configurations +setConnectorExtraConfig () { + local yamlPath="$1" + local connectorAttributes="$2" + local extraConfigPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${connectorAttributes}" ]]; then + return + fi + ## Getting extraConfig yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConfig "Warning" + extraConfigPath="${YAML_VALUE}" + if [[ -z "${extraConfigPath}" ]]; then + return + fi + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setSystemValue "${extraConfigPath}" "${connectorAttributes}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConfigPath}] with connector attributes in system.yaml" +} + +# Updating system.yaml with extra Connectors +setExtraConnector () { + local yamlPath="$1" + local extraConnector="$2" + local extraConnectorYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${extraConnector}" ]]; then + return + fi + ## Getting extraConnecotr yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConnectorYamlPath "Warning" + extraConnectorYamlPath="${YAML_VALUE}" + if [[ -z "${extraConnectorYamlPath}" ]]; then + return + fi + getYamlValue "${extraConnectorYamlPath}" "${SYSTEM_YAML_PATH}" "false" + local connectorExtra="${YAML_VALUE}" + if [[ -z "${connectorExtra}" ]]; then + setSystemValue "${extraConnectorYamlPath}" "${extraConnector}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + else + setSystemValue "${extraConnectorYamlPath}" "\"${connectorExtra} ${extraConnector}\"" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + fi +} + +# Migrate extra connectors to system.yaml +migrateExtraConnectors () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local excludeDefaultPort="$4" + local i="$5" + local extraConfig= + local extraConnector= + if [[ "${excludeDefaultPort}" == "yes" ]]; then + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" && "${portValue}" != "${DEFAULT_RT_PORT}" ]] || continue + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + done + else + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + fi +} + +# Migrate connector configurations +migrateConnectorConfig () { + local i="$1" + local protocolType="$2" + local portValue="$3" + local connectorPortYamlPath="$4" + local connectorMaxThreadYamlPath="$5" + local connectorAttributesYamlPath="$6" + local filePath="$7" + local fileName="$8" + local connectorSendReasonPhraseYamlPath="$9" + local connectorRelaxedPathCharsYamlPath="${10}" + local connectorRelaxedQueryCharsYamlPath="${11}" + + # migrate port + setConnectorPort "${connectorPortYamlPath}" "${portValue}" + + # migrate maxThreads + local maxThreadValue=$(getXmlConnectorMaxThreads "$i" "${filePath}" "${fileName}") + setConnectorMaxThread "${connectorMaxThreadYamlPath}" "${maxThreadValue}" + + # migrate sendReasonPhrase + local sendReasonPhraseValue=$(getXmlConnectorSendReasonPhrase "$i" "${filePath}" "${fileName}") + setConnectorSendReasonPhrase "${connectorSendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" + + # migrate relaxedPathChars + local relaxedPathCharsValue=$(getXmlConnectorRelaxedPathChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedPathChars "${connectorRelaxedPathCharsYamlPath}" "\"${relaxedPathCharsValue}\"" + # migrate relaxedQueryChars + local relaxedQueryCharsValue=$(getXmlConnectorRelaxedQueryChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedQueryChars "${connectorRelaxedQueryCharsYamlPath}" "\"${relaxedQueryCharsValue}\"" + + # migrate all attributes to extra config except port , maxThread , sendReasonPhrase ,relaxedPathChars and relaxedQueryChars + local connectorAttributes=$(getXmlConnectorAttributes "$i" "${filePath}" "${fileName}") + connectorAttributes=$(echo "${connectorAttributes}" | sed 's/port="'${portValue}'"//g' | sed 's/maxThreads="'${maxThreadValue}'"//g' | sed 's/sendReasonPhrase="'${sendReasonPhraseValue}'"//g' | sed 's/relaxedPathChars="\'${relaxedPathCharsValue}'\"//g' | sed 's/relaxedQueryChars="\'${relaxedQueryCharsValue}'\"//g') + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setConnectorExtraConfig "${connectorAttributesYamlPath}" "${connectorAttributes}" +} + +# Check for default port 8040 and 8081 in connectors and migrate +migrateConnectorPort () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + local connectorPortYamlPath="$5" + local connectorMaxThreadYamlPath="$6" + local connectorAttributesYamlPath="$7" + local connectorSendReasonPhraseYamlPath="$8" + local connectorRelaxedPathCharsYamlPath="$9" + local connectorRelaxedQueryCharsYamlPath="${10}" + local portYamlPath= + local maxThreadYamlPath= + local status= + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" == *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + RT_DEFAULTPORT_STATUS=success + else + AC_DEFAULTPORT_STATUS=success + fi + migrateConnectorConfig "${i}" "${protocolType}" "${portValue}" "${connectorPortYamlPath}" "${connectorMaxThreadYamlPath}" "${connectorAttributesYamlPath}" "${filePath}" "${fileName}" "${connectorSendReasonPhraseYamlPath}" "${connectorRelaxedPathCharsYamlPath}" "${connectorRelaxedQueryCharsYamlPath}" + done +} + +# migrate to extra, connector having default port and protocol is AJP +migrateDefaultPortIfAjp () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + done + +} + +# Comparing max threads in connectors +compareMaxThreads () { + local firstConnectorMaxThread="$1" + local firstConnectorNode="$2" + local secondConnectorMaxThread="$3" + local secondConnectorNode="$4" + local filePath="$5" + local fileName="$6" + + # choose higher maxThreads connector as Artifactory. + if [[ "${firstConnectorMaxThread}" -gt ${secondConnectorMaxThread} || "${firstConnectorMaxThread}" -eq ${secondConnectorMaxThread} ]]; then + # maxThread is higher in firstConnector, + # Taking firstConnector as Artifactory and SecondConnector as Access + # maxThread is equal in both connector,considering firstConnector as Artifactory and SecondConnector as Access + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + else + # maxThread is higher in SecondConnector, + # Taking SecondConnector as Artifactory and firstConnector as Access + local rtPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +# Check max threads exist to compare +maxThreadsExistToCompare () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local firstConnectorMaxThread= + local secondConnectorMaxThread= + local firstConnectorNode= + local secondConnectorNode= + local status=success + local firstnode=fail + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ ${protocolType} == *AJP* ]]; then + # Migrate Connectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + fi + # store maxthreads value of each connector + if [[ ${firstnode} == "fail" ]]; then + firstConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + firstConnectorNode="${i}" + firstnode=success + else + secondConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + secondConnectorNode="${i}" + fi + done + [[ -z "${firstConnectorMaxThread}" ]] && status=fail + [[ -z "${secondConnectorMaxThread}" ]] && status=fail + # maxThreads is set, now compare MaxThreads + if [[ "${status}" == "success" ]]; then + compareMaxThreads "${firstConnectorMaxThread}" "${firstConnectorNode}" "${secondConnectorMaxThread}" "${secondConnectorNode}" "${filePath}" "${fileName}" + else + # Assume first connector is RT, maxThreads is not set in both connectors + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +migrateExtraBasedOnNonAjpCount () { + local nonAjpCount="$1" + local filePath="$2" + local fileName="$3" + local connectorCount="$4" + local i="$5" + + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ "${protocolType}" == *AJP* ]]; then + if [[ "${nonAjpCount}" -eq 1 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + else + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + continue + fi + fi +} + +# find RT and AC Connector +findRtAndAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local initialAjpCount=0 + local nonAjpCount=0 + + # get the count of non AJP + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] || continue + nonAjpCount=$((initialAjpCount+1)) + initialAjpCount="${nonAjpCount}" + done + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access and artifactory connectors + # Mark port as 8040 for access + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + done + elif [[ "${nonAjpCount}" -eq 2 ]]; then + # compare maxThreads in both connectors + maxThreadsExistToCompare "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${nonAjpCount}" -gt 2 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # setting with default port in system.yaml + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# get the count of non AJP +getCountOfNonAjp () { + local port="$1" + local connectorCount="$2" + local filePath=$3 + local fileName=$4 + local initialNonAjpCount=0 + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${port}" ]] || continue + [[ "${protocolType}" != *AJP* ]] || continue + local nonAjpCount=$((initialNonAjpCount+1)) + initialNonAjpCount="${nonAjpCount}" + done + echo -e "${nonAjpCount}" +} + +# Find for access connector +findAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_RT_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access connector and mark port as that of connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take RT properties into access with 8040 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add RT connector details as access connector and mark port as 8040 + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# Find for artifactory connector +findRtConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_ACCESS_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as RT connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take access properties into artifactory with 8081 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add access connector details as RT connector and mark as ${DEFAULT_RT_PORT} + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +checkForTlsConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + local sslProtocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sslProtocol' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${sslProtocolValue}" == "TLS" ]]; then + bannerImportant "NOTE: Ignoring TLS connector during migration, modify the system yaml to enable TLS. Original server.xml is saved in path [${filePath}/${fileName}]" + TLS_CONNECTOR_EXISTS=${FLAG_Y} + continue + fi + done +} + +# set custom tomcat server Listeners to system.yaml +setListenerConnector () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + for ((i = 1 ; i <= "${listenerCount}" ; i++)) + do + local listenerConnector=$($LIBXML2_PATH --xpath '//Server/Listener['$i']' ${filePath}/${fileName} 2>/dev/null) + local listenerClassName=$($LIBXML2_PATH --xpath '//Server/Listener['$i']/@className' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${listenerClassName}" == *Apr* ]]; then + setExtraConnector "${EXTRA_LISTENER_CONFIG_YAMLPATH}" "${listenerConnector}" + fi + done +} +# add custom tomcat server Listeners +addTomcatServerListeners () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + if [[ "${listenerCount}" == "0" ]]; then + logger "No listener connectors found in the [${filePath}/${fileName}],skipping migration of listener connectors" + else + setListenerConnector "${filePath}" "${fileName}" "${listenerCount}" + setSystemValue "${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}" "true" "${SYSTEM_YAML_PATH}" + logger "Setting [${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}] with value [true] in system.yaml" + fi +} + +# server.xml migration operations +xmlMigrateOperation () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local listenerCount="$4" + RT_DEFAULTPORT_STATUS=fail + AC_DEFAULTPORT_STATUS=fail + TLS_CONNECTOR_EXISTS=${FLAG_N} + + # Check for connector with TLS , if found ignore migrating it + checkForTlsConnector "${filePath}" "${fileName}" "${connectorCount}" + if [[ "${TLS_CONNECTOR_EXISTS}" == "${FLAG_Y}" ]]; then + return + fi + addTomcatServerListeners "${filePath}" "${fileName}" "${listenerCount}" + # Migrate RT default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + # Migrate to extra if RT default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" + # Migrate AC default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + # Migrate to extra if access default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" + + if [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # RT and AC default port found + logger "Artifactory 8081 and Access 8040 default port are found" + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # Only AC default port found,find RT connector + logger "Found Access default 8040 port" + findRtConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # Only RT default port found,find AC connector + logger "Found Artifactory default 8081 port" + findAcConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # RT and AC default port not found, find connector + logger "Artifactory 8081 and Access 8040 default port are not found" + findRtAndAcConnector "${filePath}" "${fileName}" "${connectorCount}" + fi +} + +# get count of connectors +getXmlConnectorCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Service/Connector)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# get count of listener connectors +getTomcatServerListenersCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Listener)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# Migrate server.xml configuration to system.yaml +migrateXmlFile () { + local xmlFiles= + local fileName= + local filePath= + local sourceFilePath= + DEFAULT_ACCESS_PORT="8040" + DEFAULT_RT_PORT="8081" + AC_PORT_YAMLPATH="migration.xmlFiles.serverXml.access.port" + AC_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.access.maxThreads" + AC_SENDREASONPHRASE_YAMLPATH="migration.xmlFiles.serverXml.access.sendReasonPhrase" + AC_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.access.extraConfig" + RT_PORT_YAMLPATH="migration.xmlFiles.serverXml.artifactory.port" + RT_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.artifactory.maxThreads" + RT_SENDREASONPHRASE_YAMLPATH='migration.xmlFiles.serverXml.artifactory.sendReasonPhrase' + RT_RELAXEDPATHCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedPathChars' + RT_RELAXEDQUERYCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedQueryChars' + RT_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.artifactory.extraConfig" + ROUTER_PORT_YAMLPATH="migration.xmlFiles.serverXml.router.port" + EXTRA_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.config" + EXTRA_LISTENER_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.listener" + RT_TOMCAT_HTTPSCONNECTOR_ENABLED="artifactory.tomcat.httpsConnector.enabled" + + retrieveYamlValue "migration.xmlFiles" "xmlFiles" "Skip" + xmlFiles="${YAML_VALUE}" + if [[ -z "${xmlFiles}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF XML FILES" + retrieveYamlValue "migration.xmlFiles.serverXml.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + if [[ -z "${fileName}" ]]; then + return + fi + bannerSubSection "Processing Migration of $fileName" + retrieveYamlValue "migration.xmlFiles.serverXml.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + if [[ -z "${filePath}" ]]; then + return + fi + # prepend NEW_DATA_DIR only if filePath is relative path + sourceFilePath=$(prependDir "${filePath}" "${NEW_DATA_DIR}/${filePath}") + if [[ "$(checkFileExists "${sourceFilePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] is found in path [${sourceFilePath}]" + local connectorCount=$(getXmlConnectorCount "${sourceFilePath}" "${fileName}") + if [[ "${connectorCount}" == "0" ]]; then + logger "No connectors found in the [${filePath}/${fileName}],skipping migration of xml configuration" + return + fi + local listenerCount=$(getTomcatServerListenersCount "${sourceFilePath}" "${fileName}") + xmlMigrateOperation "${sourceFilePath}" "${fileName}" "${connectorCount}" "${listenerCount}" + else + logger "File [${fileName}] is not found in path [${sourceFilePath}] to migrate" + fi +} + +compareArtifactoryUser () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + + if [[ "${oldPropertyValue}" != "${newPropertyValue}" ]]; then + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" + else + logger "No change in property [${property}] value in [${sourceFile}] to migrate" + fi +} + +migrateReplicator () { + local property="$1" + local oldPropertyValue="$2" + local yamlPath="$3" + + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" +} + +compareJavaOptions () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + local oldJavaOption= + local newJavaOption= + local extraJavaOption= + local check=false + local success=true + local status=true + + oldJavaOption=$(echo "${oldPropertyValue}" | awk 'BEGIN{FS=OFS="\""}{for(i=2;i= 3.1.0" + ## Description / note + description: This CVE needs to be fixed in the alpine base image of nginx container. + diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/NOTES.txt b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/NOTES.txt new file mode 100644 index 000000000..019ed74df --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/NOTES.txt @@ -0,0 +1,118 @@ +Congratulations. You have just deployed JFrog Artifactory HA! + +{{- if .Values.artifactory.masterKey }} +{{- if and (not .Values.artifactory.masterKeySecretName) (eq .Values.artifactory.masterKey "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") }} + + +***************************************** WARNING ****************************************** +* Your Artifactory master key is still set to the provided example: * +* artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF * +* * +* You should change this to your own generated key: * +* $ export MASTER_KEY=$(openssl rand -hex 32) * +* $ echo ${MASTER_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.masterKey=${MASTER_KEY}' * +* * +* Alternatively, you can use a pre-existing secret with a key called master-key with * +* '--set artifactory.masterKeySecretName=${SECRET_NAME}' * +******************************************************************************************** +{{- end }} +{{- end }} + +{{- if .Values.artifactory.joinKey }} +{{- if eq .Values.artifactory.joinKey "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" }} + + +***************************************** WARNING ****************************************** +* Your Artifactory join key is still set to the provided example: * +* artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE * +* * +* You should change this to your own generated key: * +* $ export JOIN_KEY=$(openssl rand -hex 32) * +* $ echo ${JOIN_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.joinKey=${JOIN_KEY}' * +* * +******************************************************************************************** +{{- end }} +{{- end }} + + +{{- if .Values.postgresql.enabled }} + +DATABASE: +To extract the database password, run the following +export DB_PASSWORD=$(kubectl get --namespace {{ .Release.Namespace }} $(kubectl get secret --namespace {{ .Release.Namespace }} -o name | grep postgresql) -o jsonpath="{.data.postgresql-password}" | base64 --decode) +echo ${DB_PASSWORD} +{{- end }} + +SETUP: +1. Get the Artifactory IP and URL + + {{- if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "artifactory-ha.nginx.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + NOTE: It may take a few minutes for the LoadBalancer public IP to be available! + + You can watch the status of the service by running 'kubectl get svc -w {{ template "artifactory-ha.nginx.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.nginx.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 8080:80 + echo http://127.0.0.1:8080 + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password + + {{- if .Values.artifactory.license.secret }} + +3. Artifactory license(s) is deployed as a Kubernetes secret. This method is relevant for initial deployment only! + Updating the license should be done via Artifactory UI or REST API. If you want to keep managing the artifactory license using the same method, you can use artifactory.copyOnEveryStartup in values.yaml. + + {{- else }} + +3. Add HA licenses to activate Artifactory HA through the Artifactory UI + NOTE: Each Artifactory node requires a valid license. See https://www.jfrog.com/confluence/display/RTF/HA+Installation+and+Setup for more details. + + {{- end }} + +{{ if or .Values.artifactory.primary.javaOpts.jmx.enabled .Values.artifactory.node.javaOpts.jmx.enabled }} +JMX configuration: +{{- if not (contains "LoadBalancer" .Values.artifactory.service.type) }} +If you want to access JMX from you computer with jconsole, you should set ".Values.artifactory.service.type=LoadBalancer" !!! +{{ end }} + +1. Get the Artifactory service IP: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +export PRIMARY_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.primary.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +export MEMBER_SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory-ha.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +{{- end }} + +2. Map the service name to the service IP in /etc/hosts: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${PRIMARY_SERVICE_IP} {{ template "artifactory-ha.primary.name" . }}\" >> /etc/hosts" +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +sudo sh -c "echo \"${MEMBER_SERVICE_IP} {{ template "artifactory-ha.fullname" . }}\" >> /etc/hosts" +{{- end }} + +3. Launch jconsole: +{{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.primary.javaOpts.jmx.port }} +{{- end }} +{{- if .Values.artifactory.node.javaOpts.jmx.enabled }} +jconsole {{ template "artifactory-ha.fullname" . }}:{{ .Values.artifactory.node.javaOpts.jmx.port }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/_helpers.tpl b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/_helpers.tpl new file mode 100644 index 000000000..04109d8c4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/_helpers.tpl @@ -0,0 +1,292 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory-ha.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The primary node name +*/}} +{{- define "artifactory-ha.primary.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-primary" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-primary" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +The member node name +*/}} +{{- define "artifactory-ha.node.name" -}} +{{- if .Values.nameOverride -}} +{{- printf "%s-member" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := .Release.Name | trunc 29 -}} +{{- printf "%s-%s-member" $name .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory-ha.nginx.name" -}} +{{- default .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified Replicator app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.replicator.fullname" -}} +{{- if .Values.artifactory.replicator.ingress.name -}} +{{- .Values.artifactory.replicator.ingress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified replicator tracker ingress name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.replicator.tracker.fullname" -}} +{{- if .Values.artifactory.replicator.trackerIngress.name -}} +{{- .Values.artifactory.replicator.trackerIngress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication-tracker" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory-ha.nginx.fullname" -}} +{{- if .Values.nginx.fullnameOverride -}} +{{- .Values.nginx.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nginx.name -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory-ha.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory-ha.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory-ha.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory-ha.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory-ha.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory-ha.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory-ha.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Scheme (http/https) based on Access TLS enabled/disabled +*/}} +{{- define "artifactory-ha.scheme" -}} +{{- if .Values.access.accessConfig.security.tls -}} +{{- printf "%s" "https" -}} +{{- else -}} +{{- printf "%s" "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKey value +*/}} +{{- define "artifactory-ha.joinKey" -}} +{{- if .Values.global.joinKey -}} +{{- .Values.global.joinKey -}} +{{- else if .Values.artifactory.joinKey -}} +{{- .Values.artifactory.joinKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKey value +*/}} +{{- define "artifactory-ha.masterKey" -}} +{{- if .Values.global.masterKey -}} +{{- .Values.global.masterKey -}} +{{- else if .Values.artifactory.masterKey -}} +{{- .Values.artifactory.masterKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKeySecretName value +*/}} +{{- define "artifactory-ha.joinKeySecretName" -}} +{{- if .Values.global.joinKeySecretName -}} +{{- .Values.global.joinKeySecretName -}} +{{- else if .Values.artifactory.joinKeySecretName -}} +{{- .Values.artifactory.joinKeySecretName -}} +{{- else -}} +{{ include "artifactory-ha.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKeySecretName value +*/}} +{{- define "artifactory-ha.masterKeySecretName" -}} +{{- if .Values.global.masterKeySecretName -}} +{{- .Values.global.masterKeySecretName -}} +{{- else if .Values.artifactory.masterKeySecretName -}} +{{- .Values.artifactory.masterKeySecretName -}} +{{- else -}} +{{ include "artifactory-ha.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve imagePullSecrets value +*/}} +{{- define "artifactory-ha.imagePullSecrets" -}} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainersBegin value +*/}} +{{- define "artifactory-ha.customInitContainersBegin" -}} +{{- if .Values.global.customInitContainersBegin -}} +{{- .Values.global.customInitContainersBegin -}} +{{- else if .Values.artifactory.customInitContainersBegin -}} +{{- .Values.artifactory.customInitContainersBegin -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainers value +*/}} +{{- define "artifactory-ha.customInitContainers" -}} +{{- if .Values.global.customInitContainers -}} +{{- .Values.global.customInitContainers -}} +{{- else if .Values.artifactory.customInitContainers -}} +{{- .Values.artifactory.customInitContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumes value +*/}} +{{- define "artifactory-ha.customVolumes" -}} +{{- if .Values.global.customVolumes -}} +{{- .Values.global.customVolumes -}} +{{- else if .Values.artifactory.customVolumes -}} +{{- .Values.artifactory.customVolumes -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumeMounts value +*/}} +{{- define "artifactory-ha.customVolumeMounts" -}} +{{- if .Values.global.customVolumeMounts -}} +{{- .Values.global.customVolumeMounts -}} +{{- else if .Values.artifactory.customVolumeMounts -}} +{{- .Values.artifactory.customVolumeMounts -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customSidecarContainers value +*/}} +{{- define "artifactory-ha.customSidecarContainers" -}} +{{- if .Values.global.customSidecarContainers -}} +{{- .Values.global.customSidecarContainers -}} +{{- else if .Values.artifactory.customSidecarContainers -}} +{{- .Values.artifactory.customSidecarContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory chart image names +*/}} +{{- define "artifactory-ha.getImageInfoByValue" -}} +{{- $dot := index . 0 }} +{{- $indexReference := index . 1 }} +{{- $registryName := index $dot.Values $indexReference "image" "registry" -}} +{{- $repositoryName := index $dot.Values $indexReference "image" "repository" -}} +{{- $tag := default $dot.Chart.AppVersion (index $dot.Values $indexReference "image" "tag") | toString -}} +{{- if $dot.Values.global }} + {{- if and $dot.Values.global.versions.artifactory (or (eq $indexReference "artifactory") (eq $indexReference "nginx") ) }} + {{- $tag = $dot.Values.global.versions.artifactory | toString -}} + {{- end -}} + {{- if $dot.Values.global.imageRegistry }} + {{- printf "%s/%s:%s" $dot.Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory app version +*/}} +{{- define "artifactory-ha.app.version" -}} +{{- $image := split ":" ((include "artifactory-ha.getImageInfoByValue" (list . "artifactory")) | toString) -}} +{{- $tag := $image._1 -}} +{{- printf "%s" $tag -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/additional-resources.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/additional-resources.yaml new file mode 100644 index 000000000..c4d06f08a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/additional-resources.yaml @@ -0,0 +1,3 @@ +{{ if .Values.additionalResources }} +{{ tpl .Values.additionalResources . }} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/admin-bootstrap-creds.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/admin-bootstrap-creds.yaml new file mode 100644 index 000000000..b344e86b6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/admin-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} +{{- if .Values.artifactory.admin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "%s@%s=%s" .Values.artifactory.admin.username .Values.artifactory.admin.ip .Values.artifactory.admin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-access-config.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-access-config.yaml new file mode 100644 index 000000000..4eac505bc --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-access-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.access.accessConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-access-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + access.config.patch.yml: | +{{ tpl (toYaml .Values.access.accessConfig) . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-binarystore-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-binarystore-secret.yaml new file mode 100644 index 000000000..2e7cac758 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-binarystore + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-configmaps.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-configmaps.yaml new file mode 100644 index 000000000..1385bc578 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + labels: + app: {{ template "artifactory-ha.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-custom-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-custom-secrets.yaml new file mode 100644 index 000000000..67473fc58 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.artifactory.customSecrets }} +{{- range .Values.artifactory.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + component: "{{ $.Values.artifactory.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-database-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-database-secrets.yaml new file mode 100644 index 000000000..9ff71855f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-database-secrets.yaml @@ -0,0 +1,22 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }}-database-creds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-gcp-credentials-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-gcp-credentials-secret.yaml new file mode 100644 index 000000000..c6a2682c8 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-gcp-credentials-secret.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} +{{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-gcpcreds + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + gcp.credentials.json: |- +{{ tpl .Values.artifactory.persistence.googleStorage.gcpServiceAccount.config . | indent 4 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-installer-info.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-installer-info.yaml new file mode 100644 index 000000000..e58ec41b3 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-installer-info.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + {{ tpl .Values.installerInfo . }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-license-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-license-secret.yaml new file mode 100644 index 000000000..3f629c6e4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-license + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-migration-scripts.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-migration-scripts.yaml new file mode 100644 index 000000000..fe40f980f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-migration-scripts.yaml @@ -0,0 +1,18 @@ +{{- if .Values.artifactory.migration.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + migrate.sh: | +{{ .Files.Get "files/migrate.sh" | indent 4 }} + migrationHelmInfo.yaml: | +{{ .Files.Get "files/migrationHelmInfo.yaml" | indent 4 }} + migrationStatus.sh: | +{{ .Files.Get "files/migrationStatus.sh" | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-networkpolicy.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-networkpolicy.yaml new file mode 100644 index 000000000..371dc9a5f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory-ha.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory-ha.name" $ }} + chart: {{ template "artifactory-ha.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-nfs-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-nfs-pvc.yaml new file mode 100644 index 000000000..6ed7d82f6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-nfs-pvc.yaml @@ -0,0 +1,101 @@ +{{- if eq .Values.artifactory.persistence.type "nfs" }} +### Artifactory HA data +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-data-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haDataMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-data-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-data-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +--- +### Artifactory HA backup +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pv + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + id: {{ template "artifactory-ha.name" . }}-backup-pv + type: nfs-volume +spec: + {{- if .Values.artifactory.persistence.nfs.mountOptions }} + mountOptions: +{{ toYaml .Values.artifactory.persistence.nfs.mountOptions | indent 4 }} + {{- end }} + capacity: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + nfs: + server: {{ .Values.artifactory.persistence.nfs.ip }} + path: "{{ .Values.artifactory.persistence.nfs.haBackupMount }}" + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.fullname" . }}-backup-pvc + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + type: nfs-volume +spec: + accessModes: + - ReadWriteOnce + storageClassName: "" + resources: + requests: + storage: {{ .Values.artifactory.persistence.nfs.capacity }} + selector: + matchLabels: + id: {{ template "artifactory-ha.name" . }}-backup-pv + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-pdb.yaml new file mode 100644 index 000000000..cb45027cf --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.artifactory.node.minAvailable -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-node + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.artifactory.name }} + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.node.minAvailable }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-statefulset.yaml new file mode 100644 index 000000000..c5b6f08f4 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-node-statefulset.yaml @@ -0,0 +1,736 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.node.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.node.name" . }} + replicas: {{ .Values.artifactory.node.replicaCount }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.node.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.node.name" . }} + heritage: {{ .Release.Service }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/database-secrets: {{ include (print $.Template.BasePath "/artifactory-database-secrets.yaml") . | sha256sum }} + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.setSecurityContext }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.gid }} + {{- end }} + initContainers: + {{- if or .Values.artifactory.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "artifactory-ha.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled }} + - name: "wait-for-primary" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + echo "Waiting for primary node to be ready..."; + {{- if and .Values.artifactory.node.waitForPrimaryStartup.enabled .Values.artifactory.node.waitForPrimaryStartup.time }} + echo "Sleeping to allow time for primary node to come up"; + sleep {{ .Values.artifactory.node.waitForPrimaryStartup.time }}; + {{- else }} + while [ "$(wget --spider --no-check-certificate -S -T 3 {{ include "artifactory-ha.scheme" . }}://{{ template "artifactory-ha.primary.name" . }}:{{ .Values.artifactory.externalPort }}/ 2>&1 | grep '^ HTTP/' | awk '{print $2}')" != "200" ]; + do echo "Primary not ready. Waiting..."; sleep 3; + done; + echo "Primary node ready!"; + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + echo "Removing join.key file"; + rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/security/join.key; + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys - load from database"; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Load custom certificates from database"; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + env: + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.masterKeySecretName" . }} + key: master-key + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else if .Values.artifactory.systemYaml }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- end }} + {{- if and .Values.artifactory.customPersistentPodVolumeClaim (not .Values.artifactory.customPersistentPodVolumeClaim.skipPrepareContainer) }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customInitContainers .Values.global.customInitContainers }} +{{ tpl (include "artifactory-ha.customInitContainers" .) . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ include "artifactory-ha.app.version" . }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + {{- with .Values.artifactory.node.preStartCommand }} + echo "Running member node specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + {{- with .Values.artifactory.postStartCommand }} + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo "Running custom postStartCommand command"; + {{ tpl . $ }} + {{- end }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "false" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + name: http + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + name: http-internal + {{- if .Values.artifactory.node.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.node.javaOpts.jmx.port }} + name: tcp-jmx + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.node.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if or .Values.artifactory.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "artifactory-ha.customSidecarContainers" .) . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.node.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.node.affinity }} + {{- with .Values.artifactory.node.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- else if eq .Values.artifactory.node.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.node.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} + {{- end }} + {{- end }} + {{- with .Values.artifactory.node.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + secret: + secretName: {{ default (printf "%s-%s" (include "artifactory-ha.primary.name" .) "system-yaml") .Values.systemYamlOverride.existingSecret }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if or .Values.artifactory.customVolumes .Values.global.customVolumes }} +{{ tpl (include "artifactory-ha.customVolumes" .) . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.node.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-pdb.yaml new file mode 100644 index 000000000..cc4dfab65 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.artifactory.primary.minAvailable -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.fullname" . }}-primary + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.artifactory.name }} + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.artifactory.primary.minAvailable }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-statefulset.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-statefulset.yaml new file mode 100644 index 000000000..afe1d223d --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-primary-statefulset.yaml @@ -0,0 +1,862 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + version: {{ include "artifactory-ha.app.version" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/CHANGELOG.md) \nNote: This applies only when you are using bundled postgresql (postgresql.enabled=true) \nIf you are upgrading from a chart version (< 4.x) that has postgresql.image.tag of 9.x or 10.x, make sure to pass the current postgresql.image.tag and set databaseUpgradeReady=true \nOR \nIf you are upgrading from a chart version (>= 4.x), just set databaseUpgradeReady=true \n" .Values.databaseUpgradeReady | quote }} +{{- end }} +spec: + serviceName: {{ template "artifactory-ha.primary.name" . }} + replicas: {{ .Values.artifactory.primary.replicaCount }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + role: {{ template "artifactory-ha.primary.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + role: {{ template "artifactory-ha.primary.name" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/database-secrets: {{ include (print $.Template.BasePath "/artifactory-database-secrets.yaml") . | sha256sum }} + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.access.accessConfig }} + checksum/access-config: {{ include (print $.Template.BasePath "/artifactory-access-config.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + checksum/gcpcredentials: {{ include (print $.Template.BasePath "/artifactory-gcp-credentials-secret.yaml") . | sha256sum }} + {{- end }} + {{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} + checksum/admin-creds: {{ include (print $.Template.BasePath "/admin-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.setSecurityContext }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.gid }} + {{- end }} + initContainers: + {{- if or .Values.artifactory.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "artifactory-ha.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + - name: "create-artifactory-data-dir" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + mkdir -p {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}; + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + {{- end }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + name: volume + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + rm -rfv {{ tpl .Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir . }}/lost+found; + rm -rfv {{ .Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}/lost+found; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + volumeMounts: + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + subPath: {{ .Values.artifactory.admin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + {{- if .Values.access.accessConfig }} + echo "Copy access.config.patch.yml to {{ .Values.artifactory.persistence.mountPath }}/etc/access"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -fv /tmp/etc/access.config.patch.yml {{ .Values.artifactory.persistence.mountPath }}/etc/access/access.config.patch.yml; + {{- end }} + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + touch {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/reset_ca_keys; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Copying custom certificates to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + cp -fv /tmp/etc/tls.crt {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.crt; + cp -fv /tmp/etc/tls.key {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.private.key; + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security; + echo -n ${ARTIFACTORY_JOIN_KEY} > {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security/join.key; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + {{- end }} + env: + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + - name: ARTIFACTORY_JOIN_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.joinKeySecretName" . }} + key: join-key + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory-ha.masterKeySecretName" . }} + key: master-key + {{- end }} + volumeMounts: + - name: volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else if .Values.artifactory.systemYaml }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + mountPath: "/tmp/etc/access.config.patch.yml" + subPath: access.config.patch.yml + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + mountPath: "/tmp/etc/tls.crt" + subPath: tls.crt + - name: access-certs + mountPath: "/tmp/etc/tls.key" + subPath: tls.key + {{- end }} + {{- if and .Values.artifactory.customPersistentPodVolumeClaim (not .Values.artifactory.customPersistentPodVolumeClaim.skipPrepareContainer) }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + capabilities: + add: + - CHOWN + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customInitContainers .Values.global.customInitContainers }} +{{ tpl (include "artifactory-ha.customInitContainers" .) . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory-ha' + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ include "artifactory-ha.app.version" . }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + if [ -d /artifactory_extra_conf ] && [ -d /artifactory_bootstrap ]; then + echo "Copying bootstrap config from /artifactory_extra_conf to /artifactory_bootstrap"; + cp -Lrfv /artifactory_extra_conf/ /artifactory_bootstrap/; + fi; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_bootstrap/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /artifactory_bootstrap/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- with .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.artifactory.primary.preStartCommand }} + echo "Running primary specific custom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /entrypoint-artifactory.sh + {{- with .Values.artifactory.postStartCommand }} + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo "Running custom postStartCommand command"; + {{ tpl . $ }}; + {{- end }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory-ha.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} + - name: JF_SHARED_NODE_PRIMARY + value: "true" + - name: JF_SHARED_NODE_HAENABLED + value: "true" +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + name: http + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + name: http-internal + {{- if .Values.artifactory.primary.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.primary.javaOpts.jmx.port }} + name: tcp-jmx + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + mountPath: "/artifactory_bootstrap/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + mountPath: "{{ tpl $.Values.artifactory.persistence.fileSystem.existingSharedClaim.dataDir $ }}/filestore{{ $sharedClaimNumber }}" + {{- end }} + - name: artifactory-ha-backup + mountPath: "{{ $.Values.artifactory.persistence.fileSystem.existingSharedClaim.backupDir }}" + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-ha-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + mountPath: "/artifactory_bootstrap/gcp.credentials.json" + subPath: gcp.credentials.json + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.cluster.license" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory-ha.customVolumeMounts" .) . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.primary.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory-ha.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if or .Values.artifactory.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "artifactory-ha.customSidecarContainers" .) . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.primary.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.artifactory.primary.affinity }} + {{- with .Values.artifactory.primary.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.artifactory.primary.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.artifactory.primary.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.artifactory.primary.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- if .Values.artifactory.binarystore.enabled }} + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-binarystore + {{- end }} + {{- end }} + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.enabled }} + - name: gcpcreds-json + secret: + {{- if .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + secretName: {{ .Values.artifactory.persistence.googleStorage.gcpServiceAccount.customSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-gcpcreds + {{- end }} + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory-ha.fullname" . }}-license + {{- end }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory-ha.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory-ha.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory-ha.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory-ha.fullname" . }}-configmaps + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + secretName: {{ .Values.artifactory.admin.secret }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "file-system" }} + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) }} + - name: artifactory-ha-data-{{ $sharedClaimNumber }} + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-data-pvc-{{ $sharedClaimNumber }} + {{- end }} + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" $ }}-backup-pvc + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-ha-data + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-data-pvc + - name: artifactory-ha-backup + persistentVolumeClaim: + claimName: {{ template "artifactory-ha.fullname" . }}-backup-pvc + {{- end }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + secret: + secretName: {{ default (printf "%s-%s" (include "artifactory-ha.primary.name" .) "system-yaml") .Values.systemYamlOverride.existingSecret }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + secret: + secretName: {{ template "artifactory-ha.fullname" . }}-access-config + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + secret: + secretName: {{ .Values.access.customCertificatesSecretName }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory-ha.fullname" . }}-filebeat-config + {{- end }} + {{- if or .Values.artifactory.customVolumes .Values.global.customVolumes }} +{{ tpl (include "artifactory-ha.customVolumes" .) . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + volumeClaimTemplates: + {{- if .Values.artifactory.persistence.enabled }} + - metadata: + name: volume + {{- if not .Values.artifactory.primary.persistence.existingClaim }} + spec: + {{- if .Values.artifactory.persistence.storageClassName }} + {{- if (eq "-" .Values.artifactory.persistence.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.persistence.storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .Values.artifactory.persistence.accessMode }}" ] + resources: + requests: + storage: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-priority-class.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-priority-class.yaml new file mode 100644 index 000000000..417ec5c06 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory-ha.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-role.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-role.yaml new file mode 100644 index 000000000..c86bffddd --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-rolebinding.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-rolebinding.yaml new file mode 100644 index 000000000..4412870b1 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory-ha.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory-ha.fullname" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-secrets.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-secrets.yaml new file mode 100644 index 000000000..5870428ca --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-secrets.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if or .Values.artifactory.masterKey .Values.global.masterKey }} + {{- if not (or .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName) }} + master-key: {{ include "artifactory-ha.masterKey" . | b64enc | quote }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey }} + {{- if not (or .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName) }} + join-key: {{ include "artifactory-ha.joinKey" . | b64enc | quote }} + {{- end }} + {{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-service.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-service.yaml new file mode 100644 index 000000000..baacb970f --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-service.yaml @@ -0,0 +1,109 @@ +# Service for all Artifactory cluster nodes. +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.node.labels }} +{{ toYaml .| indent 4 }} + {{- end }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + {{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: http-router + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: http-artifactory + {{- with .Values.artifactory.node.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: tcp-jmx + {{- end }} + {{- end }} + selector: +{{- if eq (int .Values.artifactory.node.replicaCount) 0 }} + role: {{ template "artifactory-ha.primary.name" . }} +{{- else if eq .Values.artifactory.service.pool "members" }} + role: {{ template "artifactory-ha.node.name" . }} +{{- end }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} +--- +# Internal service for Artifactory primary node only! +# Used by member nodes to check readiness of primary node before starting up +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.primary.name" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.primary.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +spec: + # Statically setting service type to ClusterIP since this is an internal only service + type: ClusterIP + {{- if and (eq .Values.artifactory.service.type "ClusterIP") .Values.artifactory.service.clusterIP }} + clusterIP: {{ .Values.artifactory.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: http-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: http-artifactory + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + {{- with .Values.artifactory.primary.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: tcp-jmx + {{- end }} + {{- end }} + selector: + role: {{ template "artifactory-ha.primary.name" . }} + app: {{ template "artifactory-ha.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-serviceaccount.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-serviceaccount.yaml new file mode 100644 index 000000000..6983c1d12 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory-ha.serviceAccountName" . }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-storage-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-storage-pvc.yaml new file mode 100644 index 000000000..e0bfa6b11 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-storage-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.artifactory.customPersistentVolumeClaim }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + labels: + app: {{ template "artifactory-ha.name" . }} + version: "{{ .Values.artifactory.version }}" + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + accessModes: + {{- range .Values.artifactory.customPersistentVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentVolumeClaim.size | quote }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-system-yaml.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-system-yaml.yaml new file mode 100644 index 000000000..aaa1be152 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/artifactory-system-yaml.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.systemYamlOverride.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory-ha.primary.name" . }}-system-yaml + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/filebeat-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..d2db2a067 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.name" . }}-filebeat-config + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/ingress.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/ingress.yaml new file mode 100644 index 000000000..53f7c93ec --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/ingress.yaml @@ -0,0 +1,149 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- $ingressName := default ( include "artifactory-ha.fullname" . ) .Values.ingress.name -}} +{{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $ingressName }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- if .Values.artifactory.replicator.enabled }} +--- +{{- $replicationIngressName := default ( include "artifactory-ha.replicator.fullname" . ) .Values.artifactory.replicator.ingress.name -}} +{{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $replicationIngressName }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.ingress.annotations }} + annotations: +{{ .Values.artifactory.replicator.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.ingress.hosts }} + {{- range $host := .Values.artifactory.replicator.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: /replicator/ + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: /artifactory/api/replication/replicate/file/streaming + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.ingress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- if and .Values.artifactory.replicator.enabled .Values.artifactory.replicator.trackerIngress.enabled }} +--- +{{- $replicatorTrackerIngressName := default ( include "artifactory-ha.replicator.tracker.fullname" . ) .Values.artifactory.replicator.trackerIngress.name -}} + {{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 + {{- else }} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ $replicatorTrackerIngressName }} + labels: + app: "{{ template "artifactory-ha.name" $ }}" + chart: "{{ template "artifactory-ha.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.trackerIngress.annotations }} + annotations: +{{ .Values.artifactory.replicator.trackerIngress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.trackerIngress.hosts }} + {{- range $host := .Values.artifactory.replicator.trackerIngress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: / + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.trackerIngress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.trackerIngress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- if .Values.customIngress }} +--- +{{ .Values.customIngress | toYaml | trimSuffix "\n" }} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/logger-configmap.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/logger-configmap.yaml new file mode 100644 index 000000000..87fe8999e --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-logger + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-artifactory-conf.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-artifactory-conf.yaml new file mode 100644 index 000000000..eb1f0e698 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-certificate-secret.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-certificate-secret.yaml new file mode 100644 index 000000000..c4aceb951 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.https.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory-ha.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-conf.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-conf.yaml new file mode 100644 index 000000000..5f424d52a --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-deployment.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-deployment.yaml new file mode 100644 index 000000000..7d321b3f9 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-deployment.yaml @@ -0,0 +1,201 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory-ha.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: {{ .Values.nginx.kind }} +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: +{{- if ne .Values.nginx.kind "DaemonSet" }} + replicas: {{ .Values.nginx.replicaCount }} +{{- end }} + selector: + matchLabels: + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory-ha.serviceAccountName" . }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory-ha.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName | quote }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list . "nginx") }} + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + name: http + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + name: http-internal + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + name: https + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + name: https-internal + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.nginx.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + {{- end }} + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory-ha.getImageInfoByValue" (list $ "logger") }} + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + resources: +{{ toYaml $.Values.nginx.loggersResources | indent 10 }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory-ha.fullname" . }}-nginx-artifactory-conf + {{- end }} + + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory-ha.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory-ha.fullname" . }}-nginx-certificate + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pdb.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pdb.yaml new file mode 100644 index 000000000..8310377a6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pdb.yaml @@ -0,0 +1,19 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + component: {{ .Values.nginx.name }} + app: {{ template "artifactory-ha.name" . }} + release: {{ .Release.Name }} + minAvailable: {{ .Values.nginx.minAvailable }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pvc.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pvc.yaml new file mode 100644 index 000000000..0e573f383 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled) (eq (int .Values.nginx.replicaCount) 1) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClassName }} +{{- if (eq "-" .Values.nginx.persistence.storageClassName) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClassName }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-service.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-service.yaml new file mode 100644 index 000000000..594717cf9 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/templates/nginx-service.yaml @@ -0,0 +1,79 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory-ha.nginx.fullname" . }} + labels: + app: {{ template "artifactory-ha.name" . }} + chart: {{ template "artifactory-ha.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + {{- if .Values.nginx.service.labels }} +{{ toYaml .Values.nginx.service.labels | indent 4 }} + {{- end }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} + {{- if and (eq .Values.nginx.service.type "ClusterIP") .Values.nginx.service.clusterIP }} + clusterIP: {{ .Values.nginx.service.clusterIP }} + {{- end }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later verion + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if or .Values.nginx.https.enabled .Values.nginx.service.ssloffload }} + - port: {{ .Values.nginx.https.externalPort }} + {{- if .Values.nginx.service.ssloffload }} + targetPort: {{ .Values.nginx.http.internalPort }} + {{- else }} + targetPort: {{ .Values.nginx.https.internalPort}} + {{- end }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.nginx.ssh.externalPort }} + targetPort: {{ .Values.nginx.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + selector: + app: {{ template "artifactory-ha.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/values-large.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/values-large.yaml new file mode 100644 index 000000000..ec05d2add --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/values-large.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" + node: + replicaCount: 3 + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/values-medium.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/values-medium.yaml new file mode 100644 index 000000000..33879c00b --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/values-medium.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" + node: + replicaCount: 2 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/values-small.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/values-small.yaml new file mode 100644 index 000000000..4babf97cb --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/values-small.yaml @@ -0,0 +1,24 @@ +artifactory: + primary: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" + node: + replicaCount: 1 + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/charts/artifactory-ha/artifactory-ha/4.7.600/values.yaml b/charts/artifactory-ha/artifactory-ha/4.7.600/values.yaml new file mode 100644 index 000000000..b142d38c6 --- /dev/null +++ b/charts/artifactory-ha/artifactory-ha/4.7.600/values.yaml @@ -0,0 +1,1653 @@ +# Default values for artifactory-ha. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +global: + # imageRegistry: docker.bintray.io + # imagePullSecrets: + # - myRegistryKeySecretName + ## Chart.AppVersion can be overidden using global.versions.artifactory or .Values.artifactory.image.tag + ## Note: Order of preference is 1) global.versions 2) .Values.artifactory.image.tag 3) Chart.AppVersion + ## This applies also for nginx images (.Values.nginx.image.tag) + versions: {} + # artifactory: + # joinKey: + # masterKey: + # joinKeySecretName: + # masterKeySecretName: + # customInitContainersBegin: | + + # customInitContainers: | + + # customVolumes: | + + # customVolumeMounts: | + + # customSidecarContainers: | + +initContainerImage: docker.bintray.io/alpine:3.12.1 + +installer: + type: + platform: + +installerInfo: '{"productId": "Helm_artifactory-ha/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + +# For supporting pulling from private registries +# imagePullSecrets: +# - myRegistryKeySecretName + +## Artifactory systemYaml override +## This is for advanced usecases where users wants to provide their own systemYaml for configuring artifactory +## Refer: https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML +## Note: This will override existing (default) .Values.artifactory.systemYaml in values.yaml +## Alternatively, systemYaml can be overidden via customInitContainers using external sources like vaults, external repositories etc. Please refer customInitContainer section below for an example. +## Note: Order of preference is 1) customInitContainers 2) systemYamlOverride existingSecret 3) default systemYaml in values.yaml +systemYamlOverride: +## You can use a pre-existing secret by specifying existingSecret + existingSecret: +## The dataKey should be the name of the secret data key created. + dataKey: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + +## Allows to add custom ingress +customIngress: | + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory-ha + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory-ha + + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the postgresql dependency +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 12.5.0-debian-10-r25 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlExtendedConf: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + master: + nodeSelector: {} + affinity: {} + tolerations: [] + slave: + nodeSelector: {} + affinity: {} + tolerations: [] + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## you MUST specify custom database details here or Artifactory will NOT start +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "rds-artifactory" + # key: "db-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +logger: + image: + registry: docker.bintray.io + repository: busybox + tag: 1.31.1 + +# Artifactory +artifactory: + name: artifactory-ha + # Note that by default we use appVersion to get image tag/version + image: + registry: docker.bintray.io + repository: jfrog/artifactory-pro + # tag: + pullPolicy: IfNotPresent + + # Create a priority class for the Artifactory pods or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 200 + extraConfig: 'acceptCount="100"' + + # Support for open metrics is only available for Artifactory 7.7.x (appVersions) and above. + # To enable set `.Values.artifactory.openMetrics.enabled` to `true` + # Refer - https://www.jfrog.com/confluence/display/JFROG/Open+Metrics + openMetrics: + enabled: false + + # This directory is intended for use with NFS eventual configuration for HA + haDataDir: + enabled: false + path: + haBackupDir: + enabled: false + path: + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_bootstrap/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + # # Absolute path + # - source: /artifactory_bootstrap/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - access-audit.log + # - access-request.log + # - access-security-audit.log + # - access-service.log + # - artifactory-access.log + # - artifactory-event.log + # - artifactory-import-export.log + # - artifactory-request.log + # - artifactory-service.log + # - frontend-request.log + # - frontend-service.log + # - metadata-request.log + # - metadata-service.log + # - router-request.log + # - router-service.log + # - router-traefik.log + # - derby.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - tomcat-catalina.log + # - tomcat-localhost.log + + # Tomcat (catalina) loggers resources + catalinaLoggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 + ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + ## Add custom init containers + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-systemyaml-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'wget -O {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml https:///systemyaml' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # allowPrivilegeEscalation: false + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + # If skipPrepareContainer is set to true , this will skip the prepare-custom-persistent-volume init container + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + # skipPrepareContainer: false + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory HA requires a unique master key. Each Artifactory node must have the same master key! + ## You can generate one with the command: "openssl rand -hex 32" + ## Pass it to helm with '--set artifactory.masterKey=${MASTER_KEY}' + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + ## IMPORTANT: This is a mandatory for fresh Install of 7.x (App version) + # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + # masterKeySecretName: + + ## Join Key to connect to other services to Artifactory. + ## IMPORTANT: Setting this value overrides the existing joinKey + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + + # Add custom secrets - secret per file + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + + binarystore: + enabled: true + + ## admin allows to set the password for the default admin user. + ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate + admin: + ip: "127.0.0.1" + username: "admin" + password: + secret: + dataKey: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + extraEnvironmentVariables: + # - name: SERVER_XML_ARTIFACTORY_PORT + # value: "8081" + # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS + # value: "200" + # - name: SERVER_XML_ACCESS_MAX_THREADS + # value: "50" + # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_ACCESS_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_EXTRA_CONNECTOR + # value: "" + # - name: DB_POOL_MAX_ACTIVE + # value: "100" + # - name: DB_POOL_MAX_IDLE + # value: "10" + # - name: MY_SECRET_ENV_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret-name + # key: my-secret-key + + # TODO: Fix javaOpts for member nodes (currently uses primary settings for all nodes) + systemYaml: | + shared: + logging: + consoleLog: + enabled: {{ .Values.artifactory.consoleLog }} + extraJavaOpts: > + -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }} + {{- with .Values.artifactory.primary.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory-ha.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}" + host: "" + driver: org.postgresql.Driver + username: "{{ .Values.postgresql.postgresqlUsername }}" + {{ else }} + type: "{{ .Values.database.type }}" + driver: "{{ .Values.database.driver }}" + {{- end }} + artifactory: + {{- if .Values.artifactory.openMetrics }} + metrics: + enabled: {{ .Values.artifactory.openMetrics.enabled }} + {{- end }} + {{- if or .Values.artifactory.haDataDir.enabled .Values.artifactory.haBackupDir.enabled }} + node: + {{- if .Values.artifactory.haDataDir.path }} + haDataDir: {{ .Values.artifactory.haDataDir.path }} + {{- end }} + {{- if .Values.artifactory.haBackupDir.path }} + haBackupDir: {{ .Values.artifactory.haBackupDir.path }} + {{- end }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }} + frontend: + session: + timeMinutes: {{ .Values.frontend.session.timeoutMinutes | quote }} + access: + database: + maxOpenConnections: {{ .Values.access.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.access.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.access.tomcat.connector.extraConfig }} + {{- if .Values.access.database.enabled }} + type: "{{ .Values.access.database.type }}" + url: "{{ .Values.access.database.url }}" + driver: "{{ .Values.access.database.driver }}" + username: "{{ .Values.access.database.user }}" + password: "{{ .Values.access.database.password }}" + {{- end }} + metadata: + database: + maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }} + {{- if .Values.artifactory.replicator.enabled }} + replicator: + enabled: true + {{- end }} + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + gid: 1030 + terminationGracePeriodSeconds: 30 + + ## By default, the Artifactory StatefulSet is created with a securityContext that sets the `runAsUser` and the `fsGroup` to the `artifactory.uid` value. + ## If you want to disable the securityContext for the Artifactory StatefulSet, set this tag to false + setSecurityContext: true + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 90 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + enabled: true + local: false + redundancy: 3 + mountPath: "/var/opt/jfrog/artifactory" + accessMode: ReadWriteOnce + size: 200Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + + maxCacheSize: 50000000000 + cacheProviderDir: cache + eventual: + numberOfThreads: 10 + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## aws-s3-v3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" }} + + {{- if .Values.artifactory.persistence.fileSystem.existingSharedClaim.enabled }} + + + + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + + {{- end }} + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + // Specify the read and write strategy and redundancy for the sharding binary provider + + roundRobin + percentageFreeSpace + 2 + + + {{- range $sharedClaimNumber, $e := until (.Values.artifactory.persistence.fileSystem.existingSharedClaim.numberOfExistingClaims|int) -}} + //For each sub-provider (mount), specify the filestore location + + filestore{{ $sharedClaimNumber }} + + {{- end }} + + {{- else }} + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + 2 + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + shard-fs-1 + local + + + + + 30 + tester-remote1 + 10000 + remote + + + + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + 2 + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .maxConnections }} + {{ . }} + {{- end }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + {{- if .useInstanceCredentials }} + true + {{- else }} + false + {{- end }} + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + {{- with .enableSignedUrlRedirect }} + {{ . }} + {{- end }} + {{- with .enablePathStyleAccess }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + local + + + + 30 + 10000 + remote + + + + crossNetworkStrategy + crossNetworkStrategy + {{ .Values.artifactory.persistence.redundancy }} + + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + + crossNetworkStrategy + crossNetworkStrategy + 2 + 1 + + + + + remote + + + + local + + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type file-system + fileSystem: + ## You may also use existing shared claims for the data and backup storage. This allows storage (NAS for example) to be used for Data and Backup dirs which are safe to share across multiple artifactory nodes. + ## You may specify numberOfExistingClaims to indicate how many of these existing shared claims to mount. (Default = 1) + ## Create PVCs with ReadWriteMany that match the naming convetions: + ## {{ template "artifactory-ha.fullname" . }}-data-pvc- + ## {{ template "artifactory-ha.fullname" . }}-backup-pvc + ## Example (using numberOfExistingClaims: 2) + ## myexample-data-pvc-0 + ## myexample-data-pvc-1 + ## myexample-backup-pvc + ## Note: While you need two PVC fronting two PVs, multiple PVs can be attached to the same storage in many cases allowing you to share an underlying drive. + + ## Need to have the following set + existingSharedClaim: + enabled: false + numberOfExistingClaims: 1 + ## Should be a child directory of {{ .Values.artifactory.persistence.mountPath }} + dataDir: "{{ .Values.artifactory.persistence.mountPath }}/artifactory-data" + backupDir: "/var/opt/jfrog/artifactory-backup" + + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory-ha" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + mountOptions: [] + ## For artifactory.persistence.type google-storage + googleStorage: + ## When using GCP buckets as your binary store (Available with enterprise license only) + gcpServiceAccount: + enabled: false + ## Use either an existing secret prepared in advance or put the config (replace the content) in the values + ## ref: https://github.com/jfrog/charts/blob/master/stable/artifactory-ha/README.md#google-storage + # customSecretName: + # config: | + # { + # "type": "service_account", + # "project_id": "", + # "private_key_id": "?????", + # "private_key": "-----BEGIN PRIVATE KEY-----\n????????==\n-----END PRIVATE KEY-----\n", + # "client_email": "???@j.iam.gserviceaccount.com", + # "client_id": "???????", + # "auth_uri": "https://accounts.google.com/o/oauth2/auth", + # "token_uri": "https://oauth2.googleapis.com/token", + # "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + # "client_x509_cert_url": "https://www.googleapis.com/robot/v1....." + # } + endpoint: commondatastorage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-ha-gcp" + identity: + credential: + path: "artifactory-ha/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + maxConnections: 50 + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + enableSignedUrlRedirect: false + enablePathStyleAccess: false + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-ha-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory-ha/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: "AWS4-HMAC-SHA256" + + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Which nodes in the cluster should be in the external load balancer pool (have external traffic routed to them) + ## Supported pool values + ## members + ## all + pool: members + + ## The following Java options are passed to the java process running Artifactory. + ## This will be passed to all cluster members. Primary and member nodes. + javaOpts: {} + # other: "" + + ## The following setting are to configure a dedicated Ingress object for Replicator service + replicator: + enabled: false + ingress: + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + ## When replicator is enabled and want to use tracker feature, trackerIngress.enabled flag should be set to true + ## Please refer - https://www.jfrog.com/confluence/display/JFROG/JFrog+Peer-to-Peer+%28P2P%29+Downloads + trackerIngress: + enabled: false + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + + ssh: + enabled: false + internalPort: 1339 + externalPort: 1339 + + annotations: {} + + ## Type specific configurations. + ## There is a difference between the primary and the member nodes. + ## Customising their resources and java parameters is done here. + primary: + name: artifactory-ha-primary + # preStartCommand specific to the primary node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-primary-0` + existingClaim: false + + ## IMPORTANT: This value should remain at 1! + replicaCount: 1 + # minAvailable: 1 + + ## Resources for the primary node + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory primary node. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + nodeSelector: {} + + tolerations: [] + + affinity: {} + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + + node: + name: artifactory-ha-member + # preStartCommand specific to the member node, to be run after artifactory.preStartCommand + # preStartCommand: + labels: {} + persistence: + ## Set existingClaim to true or false + ## If true, you must prepare a PVC with the name e.g `volume-myrelease-artifactory-ha-member-0` + existingClaim: false + replicaCount: 2 + minAvailable: 1 + ## Resources for the member nodes + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory member nodes. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + corePoolSize: 16 + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + # other: "" + # xms: "1g" + # xmx: "2g" + # other: "" + nodeSelector: {} + + ## Wait for Artifactory primary + waitForPrimaryStartup: + enabled: true + + ## Setting time will override the built in test and will just wait the set time + time: + + tolerations: [] + + ## Complete specification of the "affinity" of the member nodes; if this is non-empty, + ## "podAntiAffinity" values are not used. + affinity: {} + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "" + topologyKey: "kubernetes.io/hostname" + +frontend: + ## Session settings + session: + ## Time in minutes after which the frontend token will need to be refreshed + timeoutMinutes: '30' + +access: + ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. + ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates + ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes. + ## This ensures that the node to node communication is done over TLS. + accessConfig: + security: + tls: false + + ## You can use a pre-existing secret by specifying customCertificatesSecretName + ## Example : Create a tls secret using `kubectl create secret tls --cert=ca.crt --key=ca.private.key` + # customCertificatesSecretName: + + ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key + # resetAccessCAKeys: false + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 50 + extraConfig: 'acceptCount="100"' + +metadata: + database: + maxOpenConnections: 80 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + + +# Nginx +nginx: + enabled: true + kind: Deployment + name: nginx + labels: {} + replicaCount: 1 + minAvailable: 0 + uid: 104 + gid: 107 + # Note that by default we use appVersion to get image tag/version + image: + registry: docker.bintray.io + repository: jfrog/nginx-artifactory-pro + # tag: + pullPolicy: IfNotPresent + + # Priority Class name to be used in deployment if provided + priorityClassName: + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + + # Logs options + logs: + stderr: false + level: warn + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + + {{ if .Values.nginx.logs.stderr }} + error_log stderr {{ .Values.nginx.logs.level }}; + {{- else -}} + error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }}; + {{- end }} + pid /tmp/nginx.pid; + + {{- if .Values.artifactory.ssh.enabled }} + ## SSH Server Configuration + stream { + server { + listen {{ .Values.nginx.ssh.internalPort }}; + proxy_pass {{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }}; + } + } + {{- end }} + + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 128k; + proxy_buffers 40 128k; + proxy_busy_buffers_size 128k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + sendfile on; + #tcp_nopush on; + keepalive_timeout 65; + #gzip on; + include /etc/nginx/conf.d/*.conf; + } + + artifactoryConf: | + {{- if .Values.nginx.https.enabled }} + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + {{- end }} + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory-ha.fullname" . }} {{ include "artifactory-ha.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ . }} + {{- end -}} + {{- end -}}; + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + {{- if .Values.nginx.service.ssloffload}} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host; + {{- else }} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + {{- end }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass {{ include "artifactory-ha.scheme" . }}://{{ include "artifactory-ha.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + ssloffload: false + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + labels: {} + # label-key: label-value + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + # DEPRECATED: The following will be replaced by L1065-L1076 in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ssh: + internalPort: 1339 + externalPort: 1339 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 120 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.9.2 + logstashUrl: "logstash:5044" + + terminationGracePeriod: 10 + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] + +## Allows to add additional kubernetes resources +## Use --- as a separator between multiple resources +## For an example, refer - https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-ha-values.yaml +additionalResources: | diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/CHANGELOG.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/CHANGELOG.md new file mode 100644 index 000000000..3a5fb161c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/CHANGELOG.md @@ -0,0 +1,120 @@ +# JFrog Container Registry Chart Changelog +All changes to this chart will be documented in this file. + +## [2.5.1] - Jul 29, 2020 +* Update dependency Artifactory chart version to 10.0.12 (Artifactory 7.6.3) + +## [2.5.0] - Jul 10, 2020 +* Update dependency Artifactory chart version to 10.0.3 (Artifactory 7.6.2) +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [2.4.0] - Jun 30, 2020 +* Update dependency Artifactory chart version to 9.6.0 (Artifactory 7.6.1) + +## [2.3.1] - Jun 12, 2020 +* Update dependency Artifactory chart version to 9.5.2 (Artifactory 7.5.7) + +## [2.3.0] - Jun 1, 2020 +* Update dependency Artifactory chart version to 9.5.0 (Artifactory 7.5.5) + +## [2.2.5] - May 27, 2020 +* Update dependency Artifactory chart version to 9.4.9 (Artifactory 7.4.3) + +## [2.2.4] - May 20, 2020 +* Update dependency Artifactory chart version to 9.4.6 (Artifactory 7.4.3) + +## [2.2.3] - May 07, 2020 +* Update dependency Artifactory chart version to 9.4.5 (Artifactory 7.4.3) +* Add `installerInfo` string format + +## [2.2.2] - Apr 28, 2020 +* Update dependency Artifactory chart version to 9.4.4 (Artifactory 7.4.3) + +## [2.2.1] - Apr 27, 2020 +* Update dependency Artifactory chart version to 9.4.3 (Artifactory 7.4.1) + +## [2.2.0] - Apr 14, 2020 +* Update dependency Artifactory chart version to 9.4.0 (Artifactory 7.4.1) + +## [2.2.0] - Apr 14, 2020 +* Update dependency Artifactory chart version to 9.4.0 (Artifactory 7.4.1) + +## [2.1.6] - Apr 13, 2020 +* Update dependency Artifactory chart version to 9.3.1 (Artifactory 7.3.2) + +## [2.1.5] - Apr 8, 2020 +* Update dependency Artifactory chart version to 9.2.8 (Artifactory 7.3.2) + +## [2.1.4] - Mar 30, 2020 +* Update dependency Artifactory chart version to 9.2.3 (Artifactory 7.3.2) + +## [2.1.3] - Mar 30, 2020 +* Update dependency Artifactory chart version to 9.2.1 (Artifactory 7.3.2) + +## [2.1.2] - Mar 26, 2020 +* Update dependency Artifactory chart version to 9.1.5 (Artifactory 7.3.2) + +## [2.1.1] - Mar 25, 2020 +* Update dependency Artifactory chart version to 9.1.4 (Artifactory 7.3.2) + +## [2.1.0] - Mar 23, 2020 +* Update dependency Artifactory chart version to 9.1.3 (Artifactory 7.3.2) + +## [2.0.13] - Mar 19, 2020 +* Update dependency Artifactory chart version to 9.0.28 (Artifactory 7.2.1) + +## [2.0.12] - Mar 17, 2020 +* Update dependency Artifactory chart version to 9.0.26 (Artifactory 7.2.1) + +## [2.0.11] - Mar 11, 2020 +* Unified charts public release + +## [2.0.10] - Mar 8, 2020 +* Update dependency Artifactory chart version to 9.0.20 (Artifactory 7.2.1) + +## [2.0.9] - Feb 26, 2020 +* Update dependency Artifactory chart version to 9.0.15 (Artifactory 7.2.1) + +## [2.0.0] - Feb 12, 2020 +* Update dependency Artifactory chart version to 9.0.0 (Artifactory 7.0.0) + +## [1.1.0] - Jan 19, 2020 +* Update dependency Artifactory chart version to 8.4.1 (Artifactory 6.17.0) + +## [1.1.1] - Feb 3, 2020 +* Update dependency Artifactory chart version to 8.4.4 + +## [1.1.0] - Jan 19, 2020 +* Update dependency Artifactory chart version to 8.4.1 (Artifactory 6.17.0) + +## [1.0.1] - Dec 31, 2019 +* Update dependency Artifactory chart version to 8.3.5 + +## [1.0.0] - Dec 23, 2019 +* Update dependency Artifactory chart version to 8.3.3 + +## [0.2.1] - Dec 12, 2019 +* Update dependency Artifactory chart version to 8.3.1 + +## [0.2.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [0.1.5] - Nov 28, 2019 +* Update dependency Artifactory chart version to 8.2.6 + +## [0.1.4] - Nov 20, 2019 +* Update Readme + +## [0.1.3] - Nov 20, 2019 +* Fix JCR logo url +* Update dependency to Artifactory 8.2.2 chart + +## [0.1.2] - Nov 20, 2019 +* Update JCR logo + +## [0.1.1] - Nov 20, 2019 +* Add `appVersion` to Chart.yaml + +## [0.1.0] - Nov 20, 2019 +* Initial release of the JFrog Container Registry helm chart diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/Chart.yaml new file mode 100644 index 000000000..96aed4e8b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/Chart.yaml @@ -0,0 +1,32 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-jcr +apiVersion: v1 +appVersion: 7.6.3 +description: JFrog Container Registry +home: https://jfrog.com/container-registry/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-jcr/logo/jcr-logo.png +keywords: +- artifactory +- jfrog +- container +- registry +- devops +- jfrog-container-registry +maintainers: +- email: amithk@jfrog.com + name: amithins +- email: daniele@jfrog.com + name: danielezer +- email: eldada@jfrog.com + name: eldada +- email: ramc@jfrog.com + name: chukka +- email: rimasm@jfrog.com + name: rimusz +- email: vinaya@jfrog.com + name: vinaya +name: artifactory-jcr +sources: +- https://github.com/jfrog/charts +version: 2.5.100 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/LICENSE b/charts/artifactory-jcr/artifactory-jcr/2.5.100/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/OWNERS b/charts/artifactory-jcr/artifactory-jcr/2.5.100/OWNERS new file mode 100644 index 000000000..94561416c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/OWNERS @@ -0,0 +1,14 @@ +approvers: +- eldada +- rimusz +- danielezer +- vinaya +- amithins +- chukka +reviewers: +- eldada +- rimusz +- danielezer +- vinaya +- amithins +- chukka diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/README.md new file mode 100644 index 000000000..f043782da --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/README.md @@ -0,0 +1,133 @@ +# JFrog Container Registry Helm Chart + +JFrog Container Registry is a free Artifactory edition with Docker and Helm repositories support. + +## Prerequisites Details + +* Kubernetes 1.12+ + +## Chart Details +This chart will do the following: + +* Deploy JFrog Container Registry +* Deploy an optional Nginx server +* Deploy an optional PostgreSQL Database +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client. + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +### Install Chart +To install the chart with the release name `jfrog-container-registry`: +```bash +helm upgrade --install jfrog-container-registry --set postgresql.postgresqlPassword= --namespace artifactory-jcr center/jfrog/artifactory-jcr +``` + +### Accessing JFrog Container Registry +**NOTE:** If using artifactory or nginx service type `LoadBalancer`, it might take a few minutes for JFrog Container Registry's public IP to become available. + +### Updating JFrog Container Registry +Once you have a new chart version, you can upgrade your deployment with +```bash +helm upgrade jfrog-container-registry center/jfrog/artifactory-jcr +``` + +### Deleting JFrog Container Registry + +On helm v2: +```bash +helm delete --purge jfrog-container-registry +``` + +On helm v3: +```bash +helm delete jfrog-container-registry --namespace artifactory-jcr +``` + +This will delete your JFrog Container Registry deployment.
+**NOTE:** You might have left behind persistent volumes. You should explicitly delete them with +```bash +kubectl delete pvc ... +kubectl delete pv ... +``` + +## Database +The JFrog Container Registry chart comes with PostgreSQL deployed by default.
+For details on the PostgreSQL configuration or customising the database, Look at the options described in the [Artifactory helm chart](https://github.com/jfrog/charts/tree/master/stable/artifactory). + +## Configuration +The following table lists the **basic** configurable parameters of the JFrog Container Registry chart and their default values. + +**NOTE:** All supported parameters are documented in the main [artifactory helm chart](https://github.com/jfrog/charts/tree/master/stable/artifactory). + +| Parameter | Description | Default | +|------------------------------------------------|-----------------------------------|---------------------------------------------------| +| `artifactory.artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-jcr` | +| `artifactory.artifactory.image.version` | Container tag | `.Chart.AppVersion` | +| `artifactory.artifactory.resources` | Artifactory container resources | `{}` | +| `artifactory.artifactory.javaOpts` | Artifactory Java options | `{}` | +| `artifactory.nginx.enabled` | Deploy nginx server | `true` | +| `artifactory.nginx.service.type` | Nginx service type | `LoadBalancer` | +| `artifactory.nginx.tlsSecretName` | TLS secret for Nginx pod | `` | +| `artifactory.ingress.enabled` | Enable Ingress (should come with `artifactory.nginx.enabled=false`) | `false` | +| `artifactory.ingress.tls` | Ingress TLS configuration (YAML) | `[]` | +| `artifactory.postgresql.enabled` | Use the Artifactory PostgreSQL sub chart | `true` | +| `artifactory.database` | Custom database configuration (if not using bundled PostgreSQL sub-chart) | | +| `postgresql.enabled` | Enable the Artifactory PostgreSQL sub chart | `true` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these two lines to your Helm command: +```bash +helm upgrade --install jfrog-container-registry \ + --set artifactory.nginx.enabled=false \ + --set artifactory.ingress.enabled=true \ + --set artifactory.ingress.hosts[0]="artifactory.company.com" \ + --set artifactory.artifactory.service.type=NodePort \ + --namespace artifactory-jcr center/jfrog/artifactory-jcr +``` + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml +artifactory: + artifactory: + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - jfrog-container-registry.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - jfrog-container-registry.domain.com +``` + +## Useful links +https://www.jfrog.com +https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/app-readme.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/app-readme.md new file mode 100644 index 000000000..9d9b7d85f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/app-readme.md @@ -0,0 +1,18 @@ +# JFrog Container Registry Helm Chart + +Universal Repository Manager supporting all major packaging formats, build tools and CI servers. + +## Chart Details +This chart will do the following: + +* Deploy JFrog Container Registry +* Deploy an optional Nginx server +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + + +## Useful links +Blog: [Herd Trust Into Your Rancher Labs Multi-Cloud Strategy with Artifactory](https://jfrog.com/blog/herd-trust-into-your-rancher-labs-multi-cloud-strategy-with-artifactory/) + +## Activate Your Artifactory Instance +Don't have a license? Please send an email to [rancher-jfrog-licenses@jfrog.com](mailto:rancher-jfrog-licenses@jfrog.com) to get it. + diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/.helmignore b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/CHANGELOG.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/CHANGELOG.md new file mode 100644 index 000000000..ae2651280 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/CHANGELOG.md @@ -0,0 +1,755 @@ +# JFrog Artifactory Chart Changelog +All changes to this chart will be documented in this file. + +## [10.0.12] - Jul 28, 2020 +* Document Artifactory node affinity. + +## [10.0.11] - Jul 28, 2020 +* Added maxConnections for persistent storage type aws-s3-v3. + +## [10.0.10] - Jul 28, 2020 +* Bugfix / support for userPluginSecrets with Artifactory 7 + +## [10.0.9] - Jul 27, 2020 +* Add tpl to external database secrets +* Modified `scheme` to `artifactory.scheme` + +## [10.0.8] - Jul 23, 2020 +* Added condition to disable the migration init container. + +## [10.0.7] - Jul 21, 2020 +* Updated Artifactory Chart to add node and primary labels to pods and service objects. + +## [10.0.6] - Jul 20, 2020 +* Support custom CA and certificates + +## [10.0.5] - Jul 13, 2020 +* Updated Artifactory version to 7.6.3 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.3 +* Fixed Mysql database jar path in `preStartCommand` in README + +## [10.0.4] - Jul 10, 2020 +* Move some postgresql values to where they should be according to the subchart + +## [10.0.3] - Jul 8, 2020 +* Set Artifactory access client connections to the same value as the access threads + +## [10.0.2] - Jul 6, 2020 +* Updated Artifactory version to 7.6.2 +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [10.0.1] - Jul 01, 2020 +* Add dedicated ingress object for Replicator service when enabled + +## [10.0.0] - Jun 30, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update busybox tag version to `1.31.1` +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [9.6.0] - Jun 29, 2020 +* Updated Artifactory version to 7.6.1 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.1 +* Add tpl for external database secrets + +## [9.5.5] - Jun 25, 2020 +* Stop loading the Nginx stream module because it is now a core module + +## [9.5.4] - Jun 25, 2020 +* Notes.txt update - add --namespace parameter + +## [9.5.3] - Jun 11, 2020 +* Support list of custom secrets + +## [9.5.2] - Jun 12, 2020 +* Updated Artifactory version to 7.5.7 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5.7 + +## [9.5.1] - Jun 8, 2020 +* Readme update - configuring Artifactory with oracledb + +## [9.5.0] - Jun 1, 2020 +* Updated Artifactory version to 7.5.5 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5 +* Fixes bootstrap configMap permission issue +* Update postgresql tag version to `9.6.18-debian-10-r7` + +## [9.4.9] - May 27, 2020 +* Added Tomcat maxThreads & acceptCount + +## [9.4.8] - May 25, 2020 +* Fixed postgresql README `image` Parameters + +## [9.4.7] - May 24, 2020 +* Fixed typo in README regarding migration timeout + +## [9.4.6] - May 19, 2020 +* Added metadata maxOpenConnections + +## [9.4.5] - May 07, 2020 +* Fix `installerInfo` string format + +## [9.4.4] - Apr 27, 2020 +* Updated Artifactory version to 7.4.3 + +## [9.4.3] - Apr 26, 2020 +* Change order of the customInitContainers to run before the "migration-artifactory" initContainer. + +## [9.4.2] - Apr 24, 2020 +* Fix `artifactory.persistence.awsS3V3.useInstanceCredentials` incorrect conditional logic +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [9.4.1] - Apr 16, 2020 +* Custom volumes in migration init container. + +## [9.4.0] - Apr 14, 2020 +* Updated Artifactory version to 7.4.1 + +## [9.3.1] - April 13, 2020 +* Update README with helm v3 commands + +## [9.3.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml +* Bump postgresql tag version to `9.6.17-debian-10-r21` in values.yaml + +## [9.2.9] - Apr 8, 2020 +* Added recommended ingress annotation to avoid 413 errors + +## [9.2.8] - Apr 8, 2020 +* Moved migration scripts under `files` directory +* Support preStartCommand in migration Init container as `artifactory.migration.preStartCommand` + +## [9.2.7] - Apr 6, 2020 +* Fix cache size (should be 5gb instead of 50gb since volume claim is only 20gb). + +## [9.2.6] - Apr 1, 2020 +* Support masterKey and joinKey as secrets + +## [9.2.5] - Apr 1, 2020 +* Fix readme use to `-hex 32` instead of `-hex 16` + +## [9.2.4] - Mar 31, 2020 +* Change the way the artifactory `command:` is set so it will properly pass a SIGTERM to java + +## [9.2.3] - Mar 29, 2020 +* Add Nginx log options: stderr as logfile and log level + +## [9.2.2] - Mar 30, 2020 +* Use the same defaulting mechanism used for the artifactory version used elsewhere in the chart + +## [9.2.1] - Mar 29, 2020 +* Fix loggers sidecars configurations to support new file system layout and new log names + +## [9.2.0] - Mar 29, 2020 +* Fix broken admin user bootstrap configuration +* **Breaking change:** renamed `artifactory.accessAdmin` to `artifactory.admin` + +## [9.1.5] - Mar 26, 2020 +* Fix volumeClaimTemplate issue + +## [9.1.4] - Mar 25, 2020 +* Fix volume name used by filebeat container + +## [9.1.3] - Mar 24, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [9.1.2] - Mar 22, 2020 +* Support for SSL offload in Nginx service(LoadBalancer) layer. Introduced `nginx.service.ssloffload` field with boolean type. + +## [9.1.1] - Mar 23, 2020 +* Moved installer info to values.yaml so it is fully customizable + +## [9.1.0] - Mar 23, 2020 +* Updated Artifactory version to 7.3.2 + +## [9.0.29] - Mar 20, 2020 +* Add support for masterKey trim during 6.x to 7.x migration if 6.x masterKey is 32 hex (64 characters) + +## [9.0.28] - Mar 18, 2020 +* Increased Nginx proxy_buffers size + +## [9.0.27] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files +* useInstanceCredentials variable was declared in S3 settings but not used in chart. Now it is being used. + +## [9.0.26] - Mar 17, 2020 +* Fix rendering of Service Account annotations + +## [9.0.25] - Mar 16, 2020 +* Update Artifactory readme with extra ingress annotations needed for Artifactory to be set as SSO provider + +## [9.0.24] - Mar 16, 2020 +* Add Unsupported message from 6.18 to 7.2.x (migration) + +## [9.0.23] - Mar 12, 2020 +* Fix README.md rendering issue + +## [9.0.22] - Mar 11, 2020 +* Upgrade Docs update + +## [9.0.21] - Mar 11, 2020 +* Unified charts public release + +## [9.0.20] - Mar 6, 2020 +* Fix path to `/artifactory_bootstrap` +* Add support for controlling the name of the ingress and allow to set more than one cname + +## [9.0.19] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [9.0.18] - Feb 28, 2020 +* Add support to process `valueFrom` for extraEnvironmentVariables + +## [9.0.17] - Feb 26, 2020 +* Fix join key secret naming + +## [9.0.16] - Feb 26, 2020 +* Store join key to secret + +## [9.0.15] - Feb 26, 2020 +* Updated Artifactory version to 7.2.1 + +## [9.0.10] - Feb 07, 2020 +* Remove protection flag `databaseUpgradeReady` which was added to check internal postgres upgrade + +## [9.0.0] - Feb 07, 2020 +* Updated Artifactory version to 7.0.0 + +## [8.4.8] - Feb 13, 2020 +* Add support for SSH authentication to Artifactory + +## [8.4.7] - Feb 11, 2020 +* Change Artifactory service port name to be hard-coded to `http` instead of using `{{ .Release.Name }}` + +## [8.4.6] - Feb 9, 2020 +* Add support for `tpl` in the `postStartCommand` + +## [8.4.5] - Feb 4, 2020 +* Support customisable Nginx kind + +## [8.4.4] - Feb 2, 2020 +* Add a comment stating that it is recommended to use an external PostgreSQL with a static password for production installations + +## [8.4.3] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [8.4.2] - Jan 26, 2020 +* Improve `database.user` and `database.password` logic in order to support more use cases and make the configuration less repetitive + +## [8.4.1] - Jan 19, 2020 +* Fix replicator port config in nginx replicator configmap + +## [8.4.0] - Jan 19, 2020 +* Updated Artifactory version to 6.17.0 + +## [8.3.6] - Jan 16, 2020 +* Added example for external nginx-ingress + +## [8.3.5] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [8.3.4] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [8.3.3] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [8.3.2] - Dec 16, 2019 +* Fix for toggling nginx service ports + +## [8.3.1] - Dec 12, 2019 +* Add support for toggling nginx service ports + +## [8.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [8.2.6] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [8.2.5] - Nov 27, 2019 +* Add support for PriorityClass + +## [8.2.4] - Nov 21, 2019 +* Add an option to use a file system cache-fs with the file-system binarystore template + +## [8.2.3] - Nov 20, 2019 +* Update Artifactory Readme + +## [8.2.2] - Nov 20, 2019 +* Update Artfactory logo + +## [8.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [8.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [8.1.11] - Nov 17, 2019 +* Do not provide a default master key. Allow it to be auto generated by Artifactory on first startup + +## [8.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [8.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [8.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [8.1.7] - Nov 9, 2019 +* Additional documentation for masterKey + +## [8.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [8.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [8.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [8.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [8.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [8.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [8.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [8.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [8.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [7.18.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [7.18.2] - Oct 21, 2019 +* Add support for setting `artifactory.labels` +* Add support for setting `nginx.labels` + +## [7.18.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [7.18.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [7.17.5] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [7.17.4] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [7.17.3] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [7.17.2] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [7.17.1] - Aug 21, 2019 +* Enable the Artifactory container's liveness and readiness probes + +## [7.17.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [7.16.11] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [7.16.10] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [7.16.9] - Aug 5, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [7.16.8] - Aug 4, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [7.16.7] - Jul 29, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [7.16.6] - Jul 24, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [7.16.5] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [7.16.4] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [7.16.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [7.16.2] - Jul 3, 2019 +* Fix values key in reverse proxy example + +## [7.16.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [7.16.0] - Jun 27, 2019 +* Update Artifactory version to 6.11 and add restart to Artifactory when bootstrap.creds file has been modified + +## [7.15.8] - Jun 27, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [7.15.6] - Jun 24, 2019 +* Update chart maintainers + +## [7.15.5] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [7.15.4] - Jun 23, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [7.15.3] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [7.15.2] - Jun 20, 2019 +* Add missing terminationGracePeriodSeconds to values.yaml + +## [7.15.1] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [7.15.0] - Jun 17, 2019 +* Use configmaps for nginx configuration and remove nginx postStart command + +## [7.14.8] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [7.14.7] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [7.14.6] - Jun 11, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [7.14.5] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [7.14.4] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [7.14.3] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [7.14.2] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [7.14.1] - May 19, 2019 +* Fix missing logger image tag + +## [7.14.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [7.13.21] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [7.13.20] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [7.13.19] - May 1, 2019 +* Fix indentation issue with the replicator system property + +## [7.13.18] - Apr 30, 2019 +* Add support for JMX monitoring + +## [7.13.17] - Apr 25, 2019 +* Added support for `cacheProviderDir` + +## [7.13.16] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [7.13.15] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [7.13.14] - Apr 15, 2019 +* Added support for `customVolumeMounts` + +## [7.13.13] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [7.13.12] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [7.13.11] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [7.13.10] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [7.13.9] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [7.13.8] - Aprl 07, 2019 +* Change network policy API group + +## [7.13.7] - Aprl 04, 2019 +* Bugfix for userPluginSecrets + +## [7.13.6] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [7.13.5] - Aprl 03, 2019 +* Added installer info + +## [7.13.4] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [7.13.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [7.13.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [7.13.1] - Mar 27, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [7.13.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [7.12.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [7.12.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [7.12.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [7.12.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [7.12.14] - Mar 21, 2019 +* Make ingress path configurable + +## [7.12.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart + +## [7.12.12] - Mar 19, 2019 +* Fix existingClaim example + +## [7.12.11] - Mar 18, 2019 +* Add information about nginx persistence + +## [7.12.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [7.12.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [7.12.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [7.12.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 +* Add support for Artifactory-CE for C++ + +## [7.12.6] - Mar 13, 2019 +* Move securityContext to container level + +## [7.12.5] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [7.12.4] - Mar 8, 2019 +* Fix existingClaim option + +## [7.12.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [7.12.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [7.12.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [7.12.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [7.11.1] - Feb 20, 2019 +* Added support for enterprise storage + +## [7.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [7.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [7.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [7.9.6] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [7.9.5] - Feb 12, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [7.9.4] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [7.9.3] - Feb 5, 2019 +* Add instructions on how to deploy Artifactory with embedded Derby database + +## [7.9.2] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [7.9.1] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [7.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [7.8.9] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [7.8.8] - Jan 17, 2019 +* Added support of values ingress.labels + +## [7.8.7] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [7.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [7.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [7.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [7.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [7.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [7.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [7.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [7.7.13] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [7.7.12] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [7.7.11] - Dec 10, 2018 +* Fix issue when using existing claim + +## [7.7.10] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [7.7.9] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [7.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [7.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [7.7.6] - Nov 19, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [7.7.5] - Nov 14, 2018 +* Fix location of `nodeSelector`, `affinity` and `tolerations` + +## [7.7.4] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [7.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [7.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [7.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [7.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [7.6.8] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [7.6.7] - Oct 23, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [7.6.6] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [7.6.5] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [7.6.4] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow providing pre-existing secrets containing external database credentials + +## [7.6.3] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [7.6.2] - Oct 17, 2018 +* Add Apache 2.0 license + +## [7.6.1] - Oct 11, 2018 +* Supports master-key in the secrets and stateful-set +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [7.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [7.5.4] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [7.5.3] - Oct 4, 2018 +* Add PostgreSQL resources template + +## [7.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [7.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [7.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [7.4.3] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [7.4.2] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 +* Removed unused PVC + +## [7.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [7.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/Chart.yaml new file mode 100644 index 000000000..2a3ddf048 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/Chart.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +appVersion: 7.6.3 +description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: amithk@jfrog.com + name: amithins +- email: daniele@jfrog.com + name: danielezer +- email: eldada@jfrog.com + name: eldada +- email: ramc@jfrog.com + name: chukka +- email: rimasm@jfrog.com + name: rimusz +name: artifactory +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 10.0.12 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/LICENSE b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/README.md new file mode 100644 index 000000000..25fadc7f4 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/README.md @@ -0,0 +1,1431 @@ +# JFrog Artifactory Helm Chart + +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory Pro trial license [get one from here](https://www.jfrog.com/artifactory/free-trial/) + +## Chart Details +This chart will do the following: + +* Deploy Artifactory-Pro/Artifactory-Edge (or OSS/CE if custom image is set) +* Deploy a PostgreSQL database using the stable/postgresql chart (can be changed) **NOTE:** For production grade installations it is recommended to use an external PostgreSQL. +* Deploy an optional Nginx server +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +### Install Chart +To install the chart with the release name `artifactory`: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory with embedded Derby database +By default, this chart deploys Artifactory with PostgreSQL (running in a separate pod). +It's possible to deploy Artifactory without PostgreSQL (or any other external database), which will default to the embedded [Derby database](https://db.apache.org/derby/). +```bash +# Disable the default postgresql +helm upgrade --install artifactory --set postgresql.enabled=false --namespace artifactory center/jfrog/artifactory +``` +Artifactory will start with it's embedded Derby database. + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available. +Follow the instructions outputted by the install command to get the Artifactory IP to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory --namespace artifactory center/jfrog/artifactory +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade center/jfrog/artifactory --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} --namespace +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + timeoutSeconds: 3600 +``` + +### Artifactory memory and CPU resources +The Artifactory Helm chart comes with support for configured resource requests and limits to Artifactory, Nginx and PostgreSQL. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. +Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.javaOpts.xms` and `artifactory.javaOpts.xmx`. +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory \ + --set artifactory.resources.requests.cpu="500m" \ + --set artifactory.resources.limits.cpu="2" \ + --set artifactory.resources.requests.memory="1Gi" \ + --set artifactory.resources.limits.memory="4Gi" \ + --set artifactory.javaOpts.xms="1g" \ + --set artifactory.javaOpts.xmx="4g" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory center/jfrog/artifactory +``` +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + + +### Artifactory storage +When using an enterprise license. Artifactory supports a wide range of storage back ends. You can see more details on [Artifactory Filestore options](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Using a network file system with the file-system persistence type +In some cases, it is not possible for the helm chart to set up your NFS mounts automatically for Artiactory. +In such cases, such as using AWS EFS, you will use the `artifactory.persistnece.type=file-system` even though your underlying persistence is actually a network file system. +The same thing applies when using a slow storage device (such as cheap disks) as your main storage solution for Artifactory. +This means that serving highly used files from the network file system/slow storage can take time, +and that's why you would want a cache filesystem that's stored locally on disk (fast disks like SSD). + +This is how you would configure it: +Create a values file with the following content: +1. Set up your volume mount to your fast storage device +```yaml +artifactory: + ## Set up your volume mount to your fast storage device + customVolumes: | + - name: my-cache-fast-storage + persistentVolumeClaim: + claimName: my-cache-fast-storage-pvc + ## Enable caching and configure the cache directory + customVolumeMounts: | + - name: my-cache-fast-storage + mountPath: /my-fast-cache-mount + ## Install the helm chart with the values file you created + persistence: + cacheProviderDir: /my-fast-cache-mount + fileSystem: + cache: + enabled: true + +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory --namespace artifactory -f values.yaml +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the said IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike th`aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +* To use a persistent volume claim as cache dir together with Azure Blob Storage, additionally pass the following parameters to `helm install` and `helm upgrade` (make sure `mountPath` and `cacheProviderDir` point to the same location) +```bash +... +--set artifactory.persistence.existingClaim=${YOUR_CLAIM} \ +--set artifactory.persistence.mountPath=/opt/cache-dir \ +--set artifactory.persistence.cacheProviderDir=/opt/cache-dir \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory --namespace artifactory --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore center/jfrog/artifactory +``` + +### Create a unique Master Key +Artifactory requires a unique master key. By default the chart has one set in values.yaml (`artifactory.masterKey`). + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Pass the created master key to helm +helm upgrade --install artifactory --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory --set artifactory.masterKeySecretName=my-secret --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory --set artifactory.joinKeySecretName=my-secret --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Customizing Database password +You can override the specified database password (set in [values.yaml](values.yaml)), by passing it as a parameter in the install command line +```bash +helm upgrade --install artifactory --namespace artifactory --set postgresql.postgresqlPassword=12_hX34qwerQ2 center/jfrog/artifactory +``` + +You can customise other parameters in the same way, by passing them on `helm install` command line. + +### Deleting Artifactory + +On helm v2: +```bash +helm delete --purge artifactory +``` + +On helm v3: +```bash +helm delete artifactory --namespace artifactory +``` +This will completely delete your Artifactory Pro deployment. +**IMPORTANT:** This will also delete your data volumes. You will lose all data! + +### Kubernetes Secret for Artifactory License +##### Use an existing secret +You can deploy the Artifactory license as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license written in it and create a Kubernetes secret from it. +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=art.lic --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + +``` + +```bash +helm upgrade --install artifactory -f values.yaml --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory. + - name: artifactory + podSelector: + matchLabels: + app: artifactory + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgres + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory +``` +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.javaOpts.jmx.enabled=true \ + --namespace artifactory center/jfrog/artifactory +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.javaOpts.jmx.port``` to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory center/jfrog/artifactory + +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with ```artifactory.javaOpts.jmx.host```). +So in order to connect to Artifactory with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory- +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-: +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f admin-creds-values.yaml +``` + +3. Restart Artifactory Pod (`Kubectl delete pod `) + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory center/jfrog/artifactory +``` +OR +```bash +helm upgrade --install artifactory --set artifactory.license.licenseKey=,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory center/jfrog/artifactory +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory --set nginx.customConfigMap=nginx-config --namespace artifactory center/jfrog/artifactory +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use an external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the name of the database. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! +##### Configuring Artifactory with external Oracle database +To use artifactory with oracledb the required instant client library files, libaio has to be copied to tomcat lib. Also set LD_LIBRARY_PATH env variable. +1. Create a value file with the configuration +```yaml +postgresql: + enabled: false +database: + type: oracle + driver: oracle.jdbc.OracleDriver + url: + user: + password: +artifactory: + preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O instantclient-basic-linux.x64-19.6.0.0.0dbru.zip https://download.oracle.com/otn_software/linux/instantclient/19600/instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && unzip -jn instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && wget -O libaio1_0.3.110-3_amd64.deb http://ftp.br.debian.org/debian/pool/main/liba/libaio/libaio1_0.3.110-3_amd64.deb && dpkg-deb -x libaio1_0.3.110-3_amd64.deb . && cp lib/x86_64-linux-gnu/* ." + extraEnvironmentVariables: + - name: LD_LIBRARY_PATH + value: /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory --namespace artifactory -f values-oracle.yaml +``` +**NOTE:** If its an upgrade from 6.x to 7.x, add same `preStartCommand` under `artifactory.migration.preStartCommand` + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +--set database.secrets.url.name=my-secret \ +--set database.secrets.url.key=url \ +... +``` + +### Deleting Artifactory +To delete the Artifactory. + +On helm v2: +```bash +helm delete --purge artifactory +``` + +On helm v3: +```bash +helm delete artifactory --namespace artifactory +``` +This will completely delete your Artifactory HA cluster. + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry, you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server= --docker-username= --docker-password= --docker-email= +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory --set imagePullSecrets=regsecret --namespace artifactory center/jfrog/artifactory +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing a custom init container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Custom secrets +If you need to add a custom secret in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom secrets in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + # Add custom secrets - secret per file + customSecrets: + - name: custom-secret + key: custom-secret.yaml + data: > + secret data +``` + +To use a custom secret, need to define a custom volume. +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + - name: custom-secret + secret: + secretName: custom-secret +``` + +To use a volume, need to define a volume mount as part of a custom init or sidecar container. +```yaml +artifactory: + customSidecarContainers: + - name: side-car-container + volumeMounts: + - name: custom-secret + mountPath: /opt/custom-secret.yaml + subPath: custom-secret.yaml + readOnly: true +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory -f plugins.yaml --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory: # Name of the artifactory dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +For additional information, please refer [here](https://www.jfrog.com/confluence/display/JFROG/User+Plugins). + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f configmaps.yaml --namespace artifactory center/jfrog/artifactory +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + +### Establishing TLS and Adding certificates +In HTTPS, the communication protocol is encrypted using Transport Layer Security (TLS). By default, TLS between JFrog Platform nodes is disabled. +When TLS is enabled, JFrog Access acts as the Certificate Authority (CA) signs the TLS certificates used by all the different JFrog Platform nodes. + +To establish TLS between JFrog Platform nodes: +Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. For more info, Please refer [here](https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates) + +To enable tls in charts, set `tls` to true under `access` in [values.yaml](values.yaml). By default it's false +```yaml +access: + accessConfig: + security: + tls: true +``` + +To add custom tls certificates, create a tls secret from the certificate files. + +```bash +kubectl create secret tls --cert=ca.crt --key=ca.private.key +``` + +For resetting access certificates , you can set `resetAccessCAKeys` to true under access section in [values.yaml](values.yaml) and perform an helm upgrade. +* Note : Once helm upgrade is done, set `resetAccessCAKeys` to false for subsequent upgrades (to avoid resetting access certificates on every helm upgrade) +```yaml +access: + accessConfig: + security: + tls: true + customCertificatesSecretName: + resetAccessCAKeys: true +``` + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f filebeat.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Install Artifactory HA with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. +In order to do that, simply add the following to a `artifactory-ssl-values.yaml` file: +```yaml + nginx: + ssloffload: true + https: + enabled: false + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ssl-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these below lines to `artifactory-ingress-values.yaml` file +```yaml + ingress: + enabled: true + hosts: + - artifactory.company.com + artifactory: + service: + type: NodePort + nginx + enabled: false +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ingress-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +If you're using Artifactory as SSO provider (e.g. with xray), you will need to have the following annotations, and change with your domain: +```yaml +.. + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_pass_header Server; + proxy_set_header X-JFrog-Override-Base-Url https://; +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install xray center/jfrog/artifactory -f artifactory-values.yaml +``` + +### Dedicated Ingress object for replicator service + +You have the option to add additional ingress object to the Replicator service. An example for this use case can be routing the /replicator/ path to Artifactory. +In order to do that, simply add the following to a `artifactory-values.yaml` file: + +```yaml +artifactory: + replicator: + enabled: true + ingress: + name: + hosts: + - myhost.example.com + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-buffering: "off" + nginx.ingress.kubernetes.io/configuration-snippet: | + chunked_transfer_encoding on; + tls: + - hosts: + - "myhost.example.com" + secretName: +``` + + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true stable/nginx-ingress +``` +or create a values.yaml file with the following content: +```yaml +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress stable/nginx-ingress -f values.yaml +``` +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + + +## Configuration +The following table lists the configurable parameters of the artifactory chart and their default values. + +| Parameter | Description | Default | +|---------------------------|-----------------------------------|----------------------------------------------------------| +| `imagePullSecrets` | Docker registry pull secret | | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Artifactory service account annotations | `` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `rbac.role.rules` | Rules to create | `[]` | +| `logger.image.repository` | repository for logger image | `busybox` | +| `logger.image.tag` | tag for logger image | `1.30` | +| `artifactory.name` | Artifactory name | `artifactory` | +| `artifactory.replicaCount` | Replica count for Artifactory deployment| `1` | +| `artifactory.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `artifactory.image.repository` | Container image | `docker.bintray.io/jfrog/artifactory-pro` | +| `artifactory.image.version` | Container tag | `.Chart.AppVersion` | +| `artifactory.labels` | Artifactory labels | `{}` | +| `artifactory.priorityClass.create` | Create a PriorityClass object | `false` | +| `artifactory.priorityClass.value` | Priority Class value | `1000000000` | +| `artifactory.priorityClass.name` | Priority Class name | `{{ template "artifactory.fullname" . }}` | +| `artifactory.priorityClass.existingPriorityClass` | Use existing priority class | `` | +| `artifactory.loggers` | Artifactory loggers (see values.yaml for possible values) | `[]` | +| `artifactory.loggersResources.requests.memory` | Artifactory loggers initial memory request | | +| `artifactory.loggersResources.requests.cpu` | Artifactory loggers initial cpu request | | +| `artifactory.loggersResources.limits.memory` | Artifactory loggers memory limit | | +| `artifactory.loggersResources.limits.cpu` | Artifactory loggers cpu limit | | +| `artifactory.catalinaLoggers` | Artifactory Tomcat loggers (see values.yaml for possible values) | `[]` | +| `artifactory.catalinaLoggersResources.requests.memory` | Artifactory Tomcat loggers initial memory request | | +| `artifactory.catalinaLoggersResources.requests.cpu` | Artifactory Tomcat loggers initial cpu request | | +| `artifactory.catalinaLoggersResources.limits.memory` | Artifactory Tomcat loggers memory limit | | +| `artifactory.catalinaLoggersResources.limits.cpu` | Artifactory Tomcat loggers cpu limit | | +| `artifactory.customInitContainers`| Custom init containers | | +| `artifactory.customSidecarContainers`| Custom sidecar containers | | +| `artifactory.customVolumes` | Custom volumes | | +| `artifactory.customVolumeMounts` | Custom Artifactory volumeMounts | | +| `artifactory.customSecrets` | Custom secrets | | +| `artifactory.customPersistentPodVolumeClaim` | Custom PVC spec to create and attach a unique PVC for each pod on startup with the volumeClaimTemplates feature in StatefulSet | | +| `artifactory.customPersistentVolumeClaim` | Custom PVC spec to be mounted to the all artifactory containers using a volume | | +| `artifactory.userPluginSecrets` | Array of secret names for Artifactory user plugins | | +| `artifactory.license.licenseKey` | Artifactory license key. Providing the license key as a parameter will cause a secret containing the license key to be created as part of the release. Use either this setting or the license.secret and license.dataKey. If you use both, the latter will be used. | | +| `artifactory.configMaps` | configMaps to be created as volume by the name `artifactory-configmaps`. In order to use these configMaps, you will need to add `customVolumeMounts` to point to the created volume and mount it onto a container | | +| `artifactory.license.secret` | Artifactory license secret name | | +| `artifactory.license.dataKey`| Artifactory license secret data key | | +| `artifactory.service.name`| Artifactory service name to be set in Nginx configuration | `artifactory` | +| `artifactory.service.type`| Artifactory service type | `ClusterIP` | +| `artifactory.service.loadBalancerSourceRanges`| Artifactory service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `artifactory.service.annotations` | Artifactory service annotations | `{}` | +| `artifactory.externalPort` | Artifactory router service external port | `8082` | +| `artifactory.internalPort` | Artifactory router service internal port (**DO NOT** use port lower than 1024) | `8082` | +| `artifactory.internalArtifactoryPort` | Artifactory service internal port (**DO NOT** use port lower than 1024) | `8081` | +| `artifactory.externalArtifactoryPort` | Artifactory service external port | `8081` | +| `artifactory.livenessProbe.enabled` | Enable liveness probe | `true` | +| `artifactory.livenessProbe.path` | Liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `artifactory.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.masterKey` | Artifactory Master Key. A 128-Bit key size (hexadecimal encoded) string (32 hex characters). Can be generated with `openssl rand -hex 32`. NOTE: This key is generated only once and cannot be updated once created | `` | +| `artifactory.masterKeySecretName` | Artifactory Master Key secret name | | +| `artifactory.joinKey` | Join Key to connect other services to Artifactory. Can be generated with `openssl rand -hex 32` | `` | +| `artifactory.joinKeySecretName` | Artifactory join Key secret name | | +| `artifactory.admin.ip` | Artifactory admin ip to be set upon startup, can use (*) for 0.0.0.0| `127.0.0.1` | +| `artifactory.admin.username` | Artifactory admin username to be set upon startup| `admin` | +| `artifactory.admin.password` | Artifactory admin password to be set upon startup| | +| `artifactory.admin.secret` | Artifactory admin secret name | | +| `artifactory.admin.dataKey` | Artifactory admin secret data key | | +| `artifactory.preStartCommand` | Command to run before entrypoint starts | | +| `artifactory.postStartCommand` | Command to run after container starts. Supports templating with `tpl` | | +| `artifactory.extraEnvironmentVariables` | Extra environment variables to pass to Artifactory. Supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function. See [documentation](https://www.jfrog.com/confluence/display/RTF/Installing+with+Docker#InstallingwithDocker-SupportedEnvironmentVariables) | | +| `artifactory.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `artifactory.readinessProbe.path` | Readiness probe HTTP Get path | `/router/api/v1/system/health` | +| `artifactory.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `artifactory.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `artifactory.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `artifactory.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 | +| `artifactory.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `artifactory.deleteDBPropertiesOnStartup` | Whether to delete the ARTIFACTORY_HOME/etc/db.properties file on startup. Disabling this will remove the ability for the db.properties to be updated with any DB-related environment variables change (e.g. DB_HOST, DB_URL) | `true` | +| `artifactory.database.maxOpenConnections` | Maximum amount of open connections from Artifactory to the DB | `80` | +| `artifactory.copyOnEveryStartup` | List of files to copy on startup from source (which is absolute) to target (which is relative to ARTIFACTORY_HOME | | +| `artifactory.migration.timeoutSeconds` | Artifactory migration Maximum Timeout in seconds| `3600` | +| `artifactory.migration.enabled` | Artifactory migration enabled or disabled | `true` | +| `artifactory.persistence.mountPath` | Artifactory persistence volume mount path | `"/var/opt/jfrog/artifactory"` | +| `artifactory.persistence.enabled` | Artifactory persistence volume enabled | `true` | +| `artifactory.persistence.existingClaim` | Artifactory persistence volume claim name | | +| `artifactory.persistence.accessMode` | Artifactory persistence volume access mode | `ReadWriteOnce` | +| `artifactory.persistence.size` | Artifactory persistence or local volume size | `20Gi` | +| `artifactory.persistence.binarystore.enabled` | whether you want to mount the binarystore.xml file from a secret created by the chart. If `false` you will need need to get the binarystore.xml file into the file-system from either an `initContainer` or using a `preStartCommand` | `true` | +| `artifactory.persistence.binarystoreXml` | Artifactory binarystore.xml template | See `values.yaml` | +| `artifactory.persistence.customBinarystoreXmlSecret` | A custom Secret for binarystore.xml | `` | +| `artifactory.persistence.maxCacheSize` | The maximum storage allocated for the cache in bytes. | `50000000000` | +| `artifactory.persistence.cacheProviderDir` | the root folder of binaries for the filestore cache. If the value specified starts with a forward slash ("/") it is considered the fully qualified path to the filestore folder. Otherwise, it is considered relative to the *baseDataDir*. | `cache` | +| `artifactory.persistence.type` | Artifactory HA storage type | `file-system` | +| `artifactory.persistence.redundancy` | Artifactory HA storage redundancy | `3` | +| `artifactory.persistence.nfs.ip` | NFS server IP | | +| `artifactory.persistence.nfs.haDataMount` | NFS data directory | `/data` | +| `artifactory.persistence.nfs.haBackupMount` | NFS backup directory | `/backup` | +| `artifactory.persistence.nfs.dataDir` | HA data directory | `/var/opt/jfrog/artifactory` | +| `artifactory.persistence.nfs.backupDir` | HA backup directory | `/var/opt/jfrog/artifactory-backup` | +| `artifactory.persistence.nfs.capacity` | NFS PVC size | `200Gi` | +| `artifactory.persistence.fileSystem.cache.enabled` | Enable Artifactory cache when using the file-system persistence type | `false` | +| `artifactory.persistence.eventual.numberOfThreads` | Eventual number of threads | `10` | +| `artifactory.persistence.googleStorage.endpoint` | Google Storage API endpoint| `storage.googleapis.com` | +| `artifactory.persistence.googleStorage.httpsOnly` | Google Storage API has to be consumed https only| `false` | +| `artifactory.persistence.googleStorage.bucketName` | Google Storage bucket name | `artifactory` | +| `artifactory.persistence.googleStorage.identity` | Google Storage service account id | | +| `artifactory.persistence.googleStorage.credential` | Google Storage service account key | | +| `artifactory.persistence.googleStorage.path` | Google Storage path in bucket | `artifactory/filestore` | +| `artifactory.persistence.googleStorage.bucketExists`| Google Storage bucket exists therefore does not need to be created.| `false` | +| `artifactory.persistence.awsS3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3.roleName` | AWS S3 IAM role name | | +| `artifactory.persistence.awsS3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3.properties` | AWS S3 additional properties | | +| `artifactory.persistence.awsS3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3.refreshCredentials` | AWS S3 renew credentials on expiration | `true` (When roleName is used, this parameter will be set to true) | +| `artifactory.persistence.awsS3.httpsOnly` | AWS S3 https access to the bucket only | `true` | +| `artifactory.persistence.awsS3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3.s3AwsVersion` | AWS S3 signature version | `AWS4-HMAC-SHA256` | +| `artifactory.persistence.awsS3V3.testConnection` | AWS S3 test connection on start up | `false` | +| `artifactory.persistence.awsS3V3.identity` | AWS S3 AWS_ACCESS_KEY_ID | | +| `artifactory.persistence.awsS3V3.credential` | AWS S3 AWS_SECRET_ACCESS_KEY | | +| `artifactory.persistence.awsS3V3.region` | AWS S3 bucket region | | +| `artifactory.persistence.awsS3V3.bucketName` | AWS S3 bucket name | `artifactory-aws` | +| `artifactory.persistence.awsS3V3.path` | AWS S3 path in bucket | `artifactory/filestore` | +| `artifactory.persistence.awsS3V3.endpoint` | AWS S3 bucket endpoint | See https://docs.aws.amazon.com/general/latest/gr/rande.html | +| `artifactory.persistence.awsS3V3.maxConnections` | AWS S3 bucket maxConnections | `50` | +| `artifactory.persistence.awsS3V3.kmsServerSideEncryptionKeyId` | AWS S3 encryption key ID or alias | | +| `artifactory.persistence.awsS3V3.kmsKeyRegion` | AWS S3 KMS Key region | | +| `artifactory.persistence.awsS3V3.kmsCryptoMode` | AWS S3 KMS encryption mode | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate | +| `artifactory.persistence.awsS3V3.useInstanceCredentials` | AWS S3 Use default authentication mechanism | See https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-authentication | +| `artifactory.persistence.awsS3V3.usePresigning` | AWS S3 Use URL signing | `false` | +| `artifactory.persistence.awsS3V3.signatureExpirySeconds` | AWS S3 Validity period in seconds for signed URLs | `300` | +| `artifactory.persistence.awsS3V3.cloudFrontDomainName` | AWS CloudFront domain name | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontKeyPairId` | AWS CloudFront key pair ID | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.awsS3V3.cloudFrontPrivateKey` | AWS CloudFront private key | See https://www.jfrog.com/confluence/display/RTF/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-UsingCloudFront(Optional)| +| `artifactory.persistence.azureBlob.accountName` | Azure Blob Storage account name | `` | +| `artifactory.persistence.azureBlob.accountKey` | Azure Blob Storage account key | `` | +| `artifactory.persistence.azureBlob.endpoint` | Azure Blob Storage endpoint | `` | +| `artifactory.persistence.azureBlob.containerName` | Azure Blob Storage container name | `` | +| `artifactory.persistence.azureBlob.testConnection` | Azure Blob Storage test connection | `false` | +| `artifactory.resources.requests.memory` | Artifactory initial memory request | | +| `artifactory.resources.requests.cpu` | Artifactory initial cpu request | | +| `artifactory.resources.limits.memory` | Artifactory memory limit | | +| `artifactory.resources.limits.cpu` | Artifactory cpu limit | | +| `artifactory.javaOpts.xms` | Artifactory java Xms size | | +| `artifactory.javaOpts.xmx` | Artifactory java Xms size | | +| `artifactory.javaOpts.corePoolSize` | The number of async processes that can run in parallel - https://jfrog.com/knowledge-base/how-do-i-tune-artifactory-for-heavy-loads/ | `8` | +| `artifactory.javaOpts.jmx.enabled` | Enable JMX monitoring | `false` | +| `artifactory.javaOpts.jmx.port` | JMX Port number | `9010` | +| `artifactory.javaOpts.jmx.host` | JMX hostname (parsed as a helm template) | `{{ template "artifactory.fullname" $ }}` | +| `artifactory.javaOpts.jmx.ssl` | Enable SSL | `false` | +| `artifactory.javaOpts.jmx.authenticate` | Enable JMX authentication | `false` | +| `artifactory.javaOpts.jmx.accessFile` | The path to the JMX access file, when JMX authentication is enabled | | +| `artifactory.javaOpts.jmx.passwordFile` | The path to the JMX password file, when JMX authentication is enabled | | +| `artifactory.javaOpts.other` | Artifactory additional java options | | +| `artifactory.replicator.enabled` | Enable the Replicator service (relevant for Enterprise+ only) | `false` | +| `artifactory.ssh.enabled` | Enable Artifactory SSH access | | +| `artifactory.ssh.internalPort` | Artifactory SSH internal port | `1339` | +| `artifactory.ssh.externalPort` | Artifactory SSH external port | `1339` | +| `artifactory.terminationGracePeriodSeconds` | Termination grace period (seconds) | `30s` | +| `artifactory.tomcat.connector.maxThreads` | The max number of connections to Artifactory connector | `200` | +| `artifactory.tomcat.connector.extraConfig` | The max queue length for incoming connections to Artifactory connector | `'acceptCount="100"'` | +| `artifactory.systemYaml` | Artifactory system configuration (`system.yaml`) as described here - https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML | `see values.yaml` | +| `artifactory.affinity` | Artifactory node affinity | `{}` | +| `access.database.maxOpenConnections` | Maximum amount of open connections from Access to the DB | `80` | +| `access.tomcat.connector.maxThreads` | The max number of connections to Aceess connector | `50` | +| `access.tomcat.connector.extraConfig` | The max queue length for incoming connections to Access connector | `'acceptCount="100"'` | +| `ingress.enabled` | If true, Artifactory Ingress will be created | `false` | +| `ingress.annotations` | Artifactory Ingress annotations | `{}` | +| `ingress.labels` | Artifactory Ingress labels | `{}` | +| `ingress.hosts` | Artifactory Ingress hostnames | `[]` | +| `ingress.routerPath` | Router Ingress path | `/` | +| `ingress.artifactoryPath` | Artifactory Ingress path | `/` | +| `ingress.tls` | Artifactory Ingress TLS configuration (YAML) | `[]` | +| `ingress.defaultBackend.enabled` | If true, the default `backend` will be added using serviceName and servicePort | `true` | +| `ingress.annotations` | Ingress annotations, which are written out if annotations section exists in values. Everything inside of the annotations section will appear verbatim inside the resulting manifest. See `Ingress annotations` section below for examples of how to leverage the annotations, specifically for how to enable docker authentication. | | +| `ingress.additionalRules` | Ingress additional rules to be added to the Artifactory ingress. | `[]` | +| `metadata.database.maxOpenConnections` | Maximum amount of open connections from metadata to the DB | `80` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.enabled` | Deploy nginx server | `true` | +| `nginx.kind` | Nginx object kind, for example `DaemonSet`, `Deployment` or `StatefulSet` | `Deployment` | +| `nginx.name` | Nginx name | `nginx` | +| `nginx.replicaCount` | Nginx replica count | `1` | +| `nginx.uid` | Nginx User Id | `104` | +| `nginx.gid` | Nginx Group Id | `107` | +| `nginx.image.repository` | Container image | `docker.bintray.io/jfrog/nginx-artifactory-pro` | +| `nginx.image.version` | Container tag | `.Chart.AppVersion` | +| `nginx.image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nginx.labels` | Nginx deployment labels | `{}` | +| `nginx.loggers` | Nginx loggers (see values.yaml for possible values) | `[]` | +| `nginx.loggersResources.requests.memory` | Nginx logger initial memory request | | +| `nginx.loggersResources.requests.cpu` | Nginx logger initial cpu request | | +| `nginx.loggersResources.limits.memory` | Nginx logger memory limit | | +| `nginx.loggersResources.limits.cpu` | Nginx logger cpu limit | | +| `nginx.logs.stderr` | Send nginx logs to stderr | false | +| `nginx.logs.level` | Nginx log level: debug, info, notice, warn, error, crit, alert, or emerg | warn | +| `nginx.mainConf` | Content of the Artifactory nginx main nginx.conf config file | `see values.yaml` | +| `nginx.artifactoryConf` | Content of Artifactory nginx artifactory.conf config file | `see values.yaml` | +| `nginx.service.type`| Nginx service type | `LoadBalancer` | +| `nginx.service.loadBalancerSourceRanges`| Nginx service array of IP CIDR ranges to whitelist (only when service type is LoadBalancer) | | +| `nginx.service.externalTrafficPolicy`| Nginx service desires to route external traffic to node-local or cluster-wide endpoints. | `Cluster` | +| `nginx.service.ssloffload` | Nginx service SSL offload | false | +| `nginx.loadBalancerIP` | Provide Static IP to configure with Nginx | | +| `nginx.http.enabled` | Nginx http service enabled/disabled | true | +| `nginx.http.externalPort` | Nginx service external port | `80` | +| `nginx.http.internalPort` | Nginx service internal port | `80` | +| `nginx.https.enabled` | Nginx http service enabled/disabled | true | +| `nginx.https.externalPort` | Nginx service external port | `443` | +| `nginx.https.internalPort` | Nginx service internal port | `443` | +| `nginx.ssh.internalPort` | Nginx SSH internal port | `22` | +| `nginx.ssh.externalPort` | Nginx SSH external port | `22` | +| `nginx.externalPortHttp` | DEPRECATED: Nginx service external port | `80` | +| `nginx.internalPortHttp` | DEPRECATED:Nginx service internal port | `80` | +| `nginx.externalPortHttps` | DEPRECATED: Nginx service external port | `443` | +| `nginx.internalPortHttps` | DEPRECATED: Nginx service internal port | `443` | +| `nginx.livenessProbe.enabled` | Enable liveness probe | `true` | +| `nginx.livenessProbe.path` | Liveness probe HTTP Get path | `/router/api/v1/system/health` | +| `nginx.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 60 | +| `nginx.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.livenessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 10 | +| `nginx.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 1| +| `nginx.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `nginx.readinessProbe.path` | Readiness probe HTTP Get path | `/artifactory/webapp/#/login` | +| `nginx.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 60 | +| `nginx.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `nginx.readinessProbe.timeoutSeconds` | When the probe times out | 10 | +| `nginx.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 10 | +| `nginx.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 1 | +| `nginx.tlsSecretName` | SSL secret that will be used by the Nginx pod | | +| `nginx.customConfigMap` | Nginx CustomeConfigMap name for `nginx.conf` | ` ` | +| `nginx.customArtifactoryConfigMap`| Nginx CustomeConfigMap name for `artifactory.conf` | ` ` | +| `nginx.persistence.mountPath` | Nginx persistence volume mount path | `"/var/opt/jfrog/nginx"` | +| `nginx.persistence.enabled` | Nginx persistence volume enabled | `false` | +| `nginx.persistence.accessMode` | Nginx persistence volume access mode | `ReadWriteOnce` | +| `nginx.persistence.size` | Nginx persistence volume size | `5Gi` | +| `nginx.resources.requests.memory` | Nginx initial memory request | | +| `nginx.resources.requests.cpu` | Nginx initial cpu request | | +| `nginx.resources.limits.memory` | Nginx memory limit | | +| `nginx.resources.limits.cpu` | Nginx cpu limit | | +| `waitForDatabase` | Wait for database (using wait-for-db init container) | `true` | +| `postgresql.enabled` | Use enclosed PostgreSQL as database | `true` | +| `postgresql.image.registry` | PostgreSQL image registry | `docker.bintray.io` | +| `postgresql.image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `postgresql.image.tag` | PostgreSQL image tag | `9.6.18-debian-10-r7` | +| `postgresql.postgresqlDatabase` | PostgreSQL database name | `artifactory` | +| `postgresql.postgresqlUsername` | PostgreSQL database user | `artifactory` | +| `postgresql.postgresqlPassword` | PostgreSQL database password | | +| `postgresql.postgresqlExtendedConf.listenAddresses` | PostgreSQL listen address | `"'*'"` | +| `postgresql.postgresqlExtendedConf.maxConnections` | PostgreSQL max_connections parameter | `1500` | +| `postgresql.persistence.enabled` | PostgreSQL use persistent storage | `true` | +| `postgresql.persistence.size` | PostgreSQL persistent storage size | `50Gi` | +| `postgresql.service.port` | PostgreSQL database port | `5432` | +| `postgresql.resources.requests.memory` | PostgreSQL initial memory request | | +| `postgresql.resources.requests.cpu` | PostgreSQL initial cpu request | | +| `postgresql.resources.limits.memory` | PostgreSQL memory limit | | +| `postgresql.resources.limits.cpu` | PostgreSQL cpu limit | | +| `postgresql.master.nodeSelector` | PostgreSQL master node selector | `{}` | +| `postgresql.master.affinity` | PostgreSQL master node affinity | `{}` | +| `postgresql.master.tolerations` | PostgreSQL master node tolerations | `[]` | +| `postgresql.slave.nodeSelector` | PostgreSQL slave node selector | `{}` | +| `postgresql.slave.affinity` | PostgreSQL slave node affinity | `{}` | +| `postgresql.slave.tolerations` | PostgreSQL slave node tolerations | `[]` | +| `database.type` | External database type (`postgresql`, `mysql`, `oracle` or `mssql`) | | +| `database.driver` | External database driver e.g. `org.postgresql.Driver` | | +| `database.url` | External database connection URL | | +| `database.user` | External database username | | +| `database.password` | External database password | | +| `database.secrets.user.name` | External database username `Secret` name | | +| `database.secrets.user.key` | External database username `Secret` key | | +| `database.secrets.password.name` | External database password `Secret` name | | +| `database.secrets.password.key` | External database password `Secret` key | | +| `database.secrets.url.name ` | External database url `Secret` name | | +| `database.secrets.url.key` | External database url `Secret` key | | +| `networkpolicy.name` | Becomes part of the NetworkPolicy object name | `artifactory` | +| `networkpolicy.podselector` | Contains the YAML that specifies how to match pods. Usually using matchLabels. | | +| `networkpolicy.ingress` | YAML snippet containing to & from rules applied to incoming traffic | `- {}` (open to all inbound traffic) | +| `networkpolicy.egress` | YAML snippet containing to & from rules applied to outgoing traffic | `- {}` (open to all outbound traffic) | +| `filebeat.enabled` | Enable a filebeat container to send your logs to a log management solution like ELK | `false` | +| `filebeat.name` | filebeat container name | `artifactory-filebeat` | +| `filebeat.image.repository` | filebeat Docker image repository | `docker.elastic.co/beats/filebeat` | +| `filebeat.image.version` | filebeat Docker image version | `7.5.1` | +| `filebeat.logstashUrl` | The URL to the central Logstash service, if you have one | `logstash:5044` | +| `filebeat.livenessProbe.exec.command` | liveness probe exec command | see [values.yaml](stable/artifactory/values.yaml) | +| `filebeat.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 180 | +| `filebeat.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.readinessProbe.exec.command` | readiness probe exec command | see [values.yaml](stable/artifactory/values.yaml) | +| `filebeat.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 10 | +| `filebeat.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 180 | +| `filebeat.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `filebeat.resources.requests.memory` | Filebeat initial memory request | | +| `filebeat.resources.requests.cpu` | Filebeat initial cpu request | | +| `filebeat.resources.limits.memory` | Filebeat memory limit | | +| `filebeat.resources.limits.cpu` | Filebeat cpu limit | | +| `filebeat.filebeatYml` | Filebeat yaml configuration file | see [values.yaml](stable/artifactory/values.yaml) | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +## Useful links +https://www.jfrog.com +https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ReverseProxyConfiguration.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ReverseProxyConfiguration.md new file mode 100644 index 000000000..38c4a9eaa --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory.fullname" . }} {{ include "artifactory.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory jfrog/artifactory -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory + ``` \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/UPGRADE_NOTES.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/UPGRADE_NOTES.md new file mode 100644 index 000000000..8917634f4 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/UPGRADE_NOTES.md @@ -0,0 +1,35 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 8.X to 9.X (Chart Versions) + +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* To upgrade from a version prior to 8.x, you first need to upgrade to latest version of 8.x as described in https://github.com/jfrog/charts/blob/master/stable/artifactory/CHANGELOG.md. + +## Upgrade from 7.X to 8.X (Chart Versions) +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + * Artifactory should now be ready to get back to normal operation diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/.helmignore b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/Chart.yaml new file mode 100644 index 000000000..a61a09ff7 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +appVersion: 11.7.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 8.7.3 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/README.md new file mode 100644 index 000000000..c2b848af1 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/README.md @@ -0,0 +1,576 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 000000000..347d3b40a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/README.md new file mode 100644 index 000000000..1813a2fea --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/conf.d/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/conf.d/README.md new file mode 100644 index 000000000..184c1875d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 000000000..cba38091e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/NOTES.txt b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/NOTES.txt new file mode 100644 index 000000000..3b5e6c60d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,60 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/_helpers.tpl b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 000000000..708434856 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,420 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/configmap.yaml new file mode 100644 index 000000000..d2178c077 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 000000000..8a4119578 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 000000000..8eb5e0588 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 000000000..524aa2f6a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 000000000..c610f09af --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 000000000..ea1fc9b3a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 000000000..44f1242dd --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/secrets.yaml new file mode 100644 index 000000000..094d18b49 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 000000000..27e5b516e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 000000000..f3a529a96 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 000000000..b6d607672 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,299 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 000000000..66eaa01d1 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,453 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | indent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/conf {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- tpl .Values.master.extraInitContainers . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-headless.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 000000000..5c71f468d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-read.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 000000000..92bdda80e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if $serviceAnnotations }} + annotations: {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc.yaml new file mode 100644 index 000000000..299e8d0b7 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if $serviceAnnotations }} + annotations: {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values-production.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values-production.yaml new file mode 100644 index 000000000..d34e326ee --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values-production.yaml @@ -0,0 +1,542 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r65 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r72 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.schema.json b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.schema.json new file mode 100644 index 000000000..ac2de6e94 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.yaml new file mode 100644 index 000000000..e14709a5e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/charts/postgresql/values.yaml @@ -0,0 +1,548 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r65 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r72 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/access-tls-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/access-tls-values.yaml new file mode 100644 index 000000000..21ffa5d6b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/access-tls-values.yaml @@ -0,0 +1,7 @@ +databaseUpgradeReady: true + +access: + accessConfig: + security: + tls: true + resetAccessCAKeys: true diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/default-values.yaml new file mode 100644 index 000000000..31e0908c2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/default-values.yaml @@ -0,0 +1,2 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +databaseUpgradeReady: true diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/migration-disabled-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/migration-disabled-values.yaml new file mode 100644 index 000000000..d3754dfab --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/migration-disabled-values.yaml @@ -0,0 +1,4 @@ +databaseUpgradeReady: true +artifactory: + migration: + enabled: false diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/test-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/test-values.yaml new file mode 100644 index 000000000..8adcd943f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/ci/test-values.yaml @@ -0,0 +1,13 @@ +databaseUpgradeReady: true +artifactory: + persistence: + enabled: true + +postgresql: + image: + tag: 9.6.18-debian-10-r7 + postgresqlPassword: password + postgresqlExtendedConf: + maxConnections: 102 + persistence: + enabled: true diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/files/migrate.sh b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/files/migrate.sh new file mode 100644 index 000000000..e35bfdbb2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/files/migrate.sh @@ -0,0 +1,4339 @@ +#!/bin/bash + +# Flags +FLAG_Y="y" +FLAG_N="n" +FLAGS_Y_N="$FLAG_Y $FLAG_N" +FLAG_NOT_APPLICABLE="_NA_" + +CURRENT_VERSION=$1 + +WRAPPER_SCRIPT_TYPE_RPMDEB="RPMDEB" +WRAPPER_SCRIPT_TYPE_DOCKER_COMPOSE="DOCKERCOMPOSE" + +SENSITIVE_KEY_VALUE="__sensitive_key_hidden___" + +# Shared system keys +SYS_KEY_SHARED_JFROGURL="shared.jfrogUrl" +SYS_KEY_SHARED_SECURITY_JOINKEY="shared.security.joinKey" +SYS_KEY_SHARED_SECURITY_MASTERKEY="shared.security.masterKey" + +SYS_KEY_SHARED_NODE_ID="shared.node.id" +SYS_KEY_SHARED_JAVAHOME="shared.javaHome" + +SYS_KEY_SHARED_DATABASE_TYPE="shared.database.type" +SYS_KEY_SHARED_DATABASE_TYPE_VALUE_POSTGRES="postgresql" +SYS_KEY_SHARED_DATABASE_DRIVER="shared.database.driver" +SYS_KEY_SHARED_DATABASE_URL="shared.database.url" +SYS_KEY_SHARED_DATABASE_USERNAME="shared.database.username" +SYS_KEY_SHARED_DATABASE_PASSWORD="shared.database.password" + +SYS_KEY_SHARED_ELASTICSEARCH_URL="shared.elasticsearch.url" +SYS_KEY_SHARED_ELASTICSEARCH_USERNAME="shared.elasticsearch.username" +SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD="shared.elasticsearch.password" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP="shared.elasticsearch.clusterSetup" +SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE="shared.elasticsearch.unicastFile" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP_VALUE="YES" + +# Define this in product specific script. Should contain the path to unitcast file +# File used by insight server to write cluster active nodes info. This will be read by elasticsearch +#SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE_VALUE="" + +SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME="shared.rabbitMq.active.node.name" +SYS_KEY_RABBITMQ_ACTIVE_NODE_IP="shared.rabbitMq.active.node.ip" + +# Filenames +FILE_NAME_SYSTEM_YAML="system.yaml" +FILE_NAME_JOIN_KEY="join.key" +FILE_NAME_MASTER_KEY="master.key" +FILE_NAME_INSTALLER_YAML="installer.yaml" + +# Global constants used in business logic +NODE_TYPE_STANDALONE="standalone" +NODE_TYPE_CLUSTER_NODE="node" +NODE_TYPE_DATABASE="database" + +# External(isable) databases +DATABASE_POSTGRES="POSTGRES" +DATABASE_ELASTICSEARCH="ELASTICSEARCH" +DATABASE_RABBITMQ="RABBITMQ" + +POSTGRES_LABEL="PostgreSQL" +ELASTICSEARCH_LABEL="Elasticsearch" +RABBITMQ_LABEL="Rabbitmq" + +ARTIFACTORY_LABEL="Artifactory" +JFMC_LABEL="Mission Control" +DISTRIBUTION_LABEL="Distribution" +XRAY_LABEL="Xray" + +POSTGRES_CONTAINER="postgres" +ELASTICSEARCH_CONTAINER="elasticsearch" +RABBITMQ_CONTAINER="rabbitmq" +REDIS_CONTAINER="redis" + +#Adding a small timeout before a read ensures it is positioned correctly in the screen +read_timeout=0.5 + +# Options related to data directory location +PROMPT_DATA_DIR_LOCATION="Installation Directory" +KEY_DATA_DIR_LOCATION="installer.data_dir" + +SYS_KEY_SHARED_NODE_HAENABLED="shared.node.haEnabled" +PROMPT_ADD_TO_CLUSTER="Are you adding an additional node to an existing product cluster?" +KEY_ADD_TO_CLUSTER="installer.ha" +VALID_VALUES_ADD_TO_CLUSTER="$FLAGS_Y_N" + +MESSAGE_POSTGRES_INSTALL="The installer can install a $POSTGRES_LABEL database, or you can connect to an existing compatible $POSTGRES_LABEL database\n(compatible databases: https://www.jfrog.com/confluence/display/JFROG/System+Requirements#SystemRequirements-RequirementsMatrix)" +PROMPT_POSTGRES_INSTALL="Do you want to install $POSTGRES_LABEL?" +KEY_POSTGRES_INSTALL="installer.install_postgresql" +VALID_VALUES_POSTGRES_INSTALL="$FLAGS_Y_N" + +# Postgres connection details +RPM_DEB_POSTGRES_HOME_DEFAULT="/var/opt/jfrog/postgres" +RPM_DEB_MESSAGE_STANDALONE_POSTGRES_DATA="$POSTGRES_LABEL home will have data and its configuration" +RPM_DEB_PROMPT_STANDALONE_POSTGRES_DATA="Type desired $POSTGRES_LABEL home location" +RPM_DEB_KEY_STANDALONE_POSTGRES_DATA="installer.postgresql.home" + +MESSAGE_DATABASE_URL="Provide the database connection details" +PROMPT_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://:/artifactory" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://:/mission_control?sslmode=disable" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://:/distribution?sslmode=disable" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://:/xraydb?sslmode=disable" + ;; + esac + if [ -z "$databaseURlExample" ]; then + echo -n "$POSTGRES_LABEL URL" # For consistency with username and password + return + fi + echo -n "$POSTGRES_LABEL url. Example: [$databaseURlExample]" +} +REGEX_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://.*/artifactory.*" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://.*/mission_control.*" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://.*/distribution.*" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://.*/xraydb.*" + ;; + esac + echo -n "^$databaseURlExample\$" +} +ERROR_MESSAGE_DATABASE_URL="Invalid $POSTGRES_LABEL URL" +KEY_DATABASE_URL="$SYS_KEY_SHARED_DATABASE_URL" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_USERNAME="$POSTGRES_LABEL username" +KEY_DATABASE_USERNAME="$SYS_KEY_SHARED_DATABASE_USERNAME" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_PASSWORD="$POSTGRES_LABEL password" +KEY_DATABASE_PASSWORD="$SYS_KEY_SHARED_DATABASE_PASSWORD" +IS_SENSITIVE_DATABASE_PASSWORD="$FLAG_Y" + +MESSAGE_STANDALONE_ELASTICSEARCH_INSTALL="The installer can install a $ELASTICSEARCH_LABEL database or you can connect to an existing compatible $ELASTICSEARCH_LABEL database" +PROMPT_STANDALONE_ELASTICSEARCH_INSTALL="Do you want to install $ELASTICSEARCH_LABEL?" +KEY_STANDALONE_ELASTICSEARCH_INSTALL="installer.install_elasticsearch" +VALID_VALUES_STANDALONE_ELASTICSEARCH_INSTALL="$FLAGS_Y_N" + +# Elasticsearch connection details +MESSAGE_ELASTICSEARCH_DETAILS="Provide the $ELASTICSEARCH_LABEL connection details" +PROMPT_ELASTICSEARCH_URL="$ELASTICSEARCH_LABEL URL" +KEY_ELASTICSEARCH_URL="$SYS_KEY_SHARED_ELASTICSEARCH_URL" + +PROMPT_ELASTICSEARCH_USERNAME="$ELASTICSEARCH_LABEL username" +KEY_ELASTICSEARCH_USERNAME="$SYS_KEY_SHARED_ELASTICSEARCH_USERNAME" + +PROMPT_ELASTICSEARCH_PASSWORD="$ELASTICSEARCH_LABEL password" +KEY_ELASTICSEARCH_PASSWORD="$SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD" +IS_SENSITIVE_ELASTICSEARCH_PASSWORD="$FLAG_Y" + +# Cluster related questions +MESSAGE_CLUSTER_MASTER_KEY="Provide the cluster's master key. It can be found in the data directory of the first node under /etc/security/master.key" +PROMPT_CLUSTER_MASTER_KEY="Master Key" +KEY_CLUSTER_MASTER_KEY="$SYS_KEY_SHARED_SECURITY_MASTERKEY" +IS_SENSITIVE_CLUSTER_MASTER_KEY="$FLAG_Y" + +MESSAGE_JOIN_KEY="The Join key is the secret key used to establish trust between services in the JFrog Platform.\n(You can copy the Join Key from Admin > Security > Settings)" +PROMPT_JOIN_KEY="Join Key" +KEY_JOIN_KEY="$SYS_KEY_SHARED_SECURITY_JOINKEY" +IS_SENSITIVE_JOIN_KEY="$FLAG_Y" +REGEX_JOIN_KEY="^[a-zA-Z0-9]{16,}\$" +ERROR_MESSAGE_JOIN_KEY="Invalid Join Key" + +# Rabbitmq related cluster information +MESSAGE_RABBITMQ_ACTIVE_NODE_NAME="Provide an active ${RABBITMQ_LABEL} node name. Run the command [ hostname -s ] on any of the existing nodes in the product cluster to get this" +PROMPT_RABBITMQ_ACTIVE_NODE_NAME="${RABBITMQ_LABEL} active node name" +KEY_RABBITMQ_ACTIVE_NODE_NAME="$SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME" + +# Rabbitmq related cluster information (necessary only for docker-compose) +PROMPT_RABBITMQ_ACTIVE_NODE_IP="${RABBITMQ_LABEL} active node ip" +KEY_RABBITMQ_ACTIVE_NODE_IP="$SYS_KEY_RABBITMQ_ACTIVE_NODE_IP" + +MESSAGE_JFROGURL(){ + echo -e "The JFrog URL allows ${PRODUCT_NAME} to connect to a JFrog Platform Instance.\n(You can copy the JFrog URL from Admin > Security > Settings)" +} +PROMPT_JFROGURL="JFrog URL" +KEY_JFROGURL="$SYS_KEY_SHARED_JFROGURL" +REGEX_JFROGURL="^https?://.*:{0,}[0-9]{0,4}\$" +ERROR_MESSAGE_JFROGURL="Invalid JFrog URL" + + +# Set this to FLAG_Y on upgrade +IS_UPGRADE="${FLAG_N}" + +# This belongs in JFMC but is the ONLY one that needs it so keeping it here for now. Can be made into a method and overridden if necessary +MESSAGE_MULTIPLE_PG_SCHEME="Please setup $POSTGRES_LABEL with schema as described in https://www.jfrog.com/confluence/display/JFROG/Installing+Mission+Control" + +_getMethodOutputOrVariableValue() { + unset EFFECTIVE_MESSAGE + local keyToSearch=$1 + local effectiveMessage= + local result="0" + # logSilly "Searching for method: [$keyToSearch]" + LC_ALL=C type "$keyToSearch" > /dev/null 2>&1 || result="$?" + if [[ "$result" == "0" ]]; then + # logSilly "Found method for [$keyToSearch]" + EFFECTIVE_MESSAGE="$($keyToSearch)" + return + fi + eval EFFECTIVE_MESSAGE=\${$keyToSearch} + if [ ! -z "$EFFECTIVE_MESSAGE" ]; then + return + fi + # logSilly "Didn't find method or variable for [$keyToSearch]" +} + + +# REF https://misc.flogisoft.com/bash/tip_colors_and_formatting +cClear="\e[0m" +cBlue="\e[38;5;69m" +cRedDull="\e[1;31m" +cYellow="\e[1;33m" +cRedBright="\e[38;5;197m" +cBold="\e[1m" + + +_loggerGetModeRaw() { + local MODE="$1" + case $MODE in + INFO) + printf "" + ;; + DEBUG) + printf "%s" "[${MODE}] " + ;; + WARN) + printf "${cRedDull}%s%s${cClear}" "[" "${MODE}" "] " + ;; + ERROR) + printf "${cRedBright}%s%s${cClear}" "[" "${MODE}" "] " + ;; + esac +} + + +_loggerGetMode() { + local MODE="$1" + case $MODE in + INFO) + printf "${cBlue}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + DEBUG) + printf "%-7s" "[${MODE}]" + ;; + WARN) + printf "${cRedDull}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + ERROR) + printf "${cRedBright}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + esac +} + +# Capitalises the first letter of the message +_loggerGetMessage() { + local originalMessage="$*" + local firstChar=$(echo "${originalMessage:0:1}" | awk '{ print toupper($0) }') + local resetOfMessage="${originalMessage:1}" + echo "$firstChar$resetOfMessage" +} + +# The spec also says content should be left-trimmed but this is not necessary in our case. We don't reach the limit. +_loggerGetStackTrace() { + printf "%s%-30s%s" "[" "$1:$2" "]" +} + +_loggerGetThread() { + printf "%s" "[main]" +} + +_loggerGetServiceType() { + printf "%s%-5s%s" "[" "shell" "]" +} + +#Trace ID is not applicable to scripts +_loggerGetTraceID() { + printf "%s" "[]" +} + +logRaw() { + echo "" + printf "$1" + echo "" +} + +logBold(){ + echo "" + printf "${cBold}$1${cClear}" + echo "" +} + +# The date binary works differently based on whether it is GNU/BSD +is_date_supported=0 +date --version > /dev/null 2>&1 || is_date_supported=1 +IS_GNU=$(echo $is_date_supported) + +_loggerGetTimestamp() { + if [ "${IS_GNU}" == "0" ]; then + echo -n $(date -u +%FT%T.%3NZ) + else + echo -n $(date -u +%FT%T.000Z) + fi +} + +# https://www.shellscript.sh/tips/spinner/ +_spin() +{ + spinner="/|\\-/|\\-" + while : + do + for i in `seq 0 7` + do + echo -n "${spinner:$i:1}" + echo -en "\010" + sleep 1 + done + done +} + +showSpinner() { + # Start the Spinner: + _spin & + # Make a note of its Process ID (PID): + SPIN_PID=$! + # Kill the spinner on any signal, including our own exit. + trap "kill -9 $SPIN_PID" `seq 0 15` &> /dev/null || return 0 +} + +stopSpinner() { + local occurrences=$(ps -ef | grep -wc "${SPIN_PID}") + let "occurrences+=0" + # validate that it is present (2 since this search itself will show up in the results) + if [ $occurrences -gt 1 ]; then + kill -9 $SPIN_PID &>/dev/null || return 0 + wait $SPIN_ID &>/dev/null + fi +} + +_getEffectiveMessage(){ + local MESSAGE="$1" + local MODE=${2-"INFO"} + + if [ -z "$CONTEXT" ]; then + CONTEXT=$(caller) + fi + + _EFFECTIVE_MESSAGE= + if [ -z "$LOG_BEHAVIOR_ADD_META" ]; then + _EFFECTIVE_MESSAGE="$(_loggerGetModeRaw $MODE)$(_loggerGetMessage $MESSAGE)" + else + local SERVICE_TYPE="script" + local TRACE_ID="" + local THREAD="main" + + local CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}') + local CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}') + + _EFFECTIVE_MESSAGE="$(_loggerGetTimestamp) $(_loggerGetServiceType) $(_loggerGetMode $MODE) $(_loggerGetTraceID) $(_loggerGetStackTrace $CONTEXT_FILE $CONTEXT_LINE) $(_loggerGetThread) - $(_loggerGetMessage $MESSAGE)" + fi + CONTEXT= +} + +# Important - don't call any log method from this method. Will become an infinite loop. Use echo to debug +_logToFile() { + local MODE=${1-"INFO"} + local targetFile="$LOG_BEHAVIOR_ADD_REDIRECTION" + # IF the file isn't passed, abort + if [ -z "$targetFile" ]; then + return + fi + # IF this is not being run in verbose mode and mode is debug or lower, abort + if [ "${VERBOSE_MODE}" != "$FLAG_Y" ] && [ "${VERBOSE_MODE}" != "true" ] && [ "${VERBOSE_MODE}" != "debug" ]; then + if [ "$MODE" == "DEBUG" ] || [ "$MODE" == "SILLY" ]; then + return + fi + fi + + # Create the file if it doesn't exist + if [ ! -f "${targetFile}" ]; then + return + # touch $targetFile > /dev/null 2>&1 || true + fi + # # Make it readable + # chmod 640 $targetFile > /dev/null 2>&1 || true + + # Log contents + printf "%s\n" "$_EFFECTIVE_MESSAGE" >> "$targetFile" || true +} + +logger() { + if [ "$LOG_BEHAVIOR_ADD_NEW_LINE" == "$FLAG_Y" ]; then + echo "" + fi + _getEffectiveMessage "$@" + local MODE=${2-"INFO"} + printf "%s\n" "$_EFFECTIVE_MESSAGE" + _logToFile "$MODE" +} + +logDebug(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "$FLAG_Y" ] || [ "${VERBOSE_MODE}" == "true" ] || [ "${VERBOSE_MODE}" == "debug" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logSilly(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "silly" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logError() { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= +} + +errorExit () { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= + exit 1 +} + +warn () { + CONTEXT=$(caller) + logger "$1" "WARN" + CONTEXT= +} + +note () { + CONTEXT=$(caller) + logger "$1" "NOTE" + CONTEXT= +} + +bannerStart() { + title=$1 + echo + echo -e "\033[1m${title}\033[0m" + echo +} + +bannerSection() { + title=$1 + echo + echo -e "******************************** ${title} ********************************" + echo +} + +bannerSubSection() { + title=$1 + echo + echo -e "************** ${title} *******************" + echo +} + +bannerMessge() { + title=$1 + echo + echo -e "********************************" + echo -e "${title}" + echo -e "********************************" + echo +} + +setRed () { + local input="$1" + echo -e \\033[31m${input}\\033[0m +} +setGreen () { + local input="$1" + echo -e \\033[32m${input}\\033[0m +} +setYellow () { + local input="$1" + echo -e \\033[33m${input}\\033[0m +} + +logger_addLinebreak () { + echo -e "---\n" +} + +bannerImportant() { + title=$1 + local bold="\033[1m" + local noColour="\033[0m" + echo + echo -e "${bold}######################################## IMPORTANT ########################################${noColour}" + echo -e "${bold}${title}${noColour}" + echo -e "${bold}###########################################################################################${noColour}" + echo +} + +bannerEnd() { + #TODO pass a title and calculate length dynamically so that start and end look alike + echo + echo "*****************************************************************************" + echo +} + +banner() { + title=$1 + content=$2 + bannerStart "${title}" + echo -e "$content" +} + +# The logic below helps us redirect content we'd normally hide to the log file. + # + # We have several commands which clutter the console with output and so use + # `cmd > /dev/null` - this redirects the command's output to null. + # + # However, the information we just hid maybe useful for support. Using the code pattern + # `cmd >&6` (instead of `cmd> >/dev/null` ), the command's output is hidden from the console + # but redirected to the installation log file + # + +#Default value of 6 is just null +exec 6>>/dev/null +redirectLogsToFile() { + echo "" + # local file=$1 + + # [ ! -z "${file}" ] || return 0 + + # local logDir=$(dirname "$file") + + # if [ ! -f "${file}" ]; then + # [ -d "${logDir}" ] || mkdir -p ${logDir} || \ + # ( echo "WARNING : Could not create parent directory (${logDir}) to redirect console log : ${file}" ; return 0 ) + # fi + + # #6 now points to the log file + # exec 6>>${file} + # #reference https://unix.stackexchange.com/questions/145651/using-exec-and-tee-to-redirect-logs-to-stdout-and-a-log-file-in-the-same-time + # exec 2>&1 > >(tee -a "${file}") +} + +# Check if a give key contains any sensitive string as part of it +# Based on the result, the caller can decide its value can be displayed or not +# Sample usage : isKeySensitive "${key}" && displayValue="******" || displayValue=${value} +isKeySensitive(){ + local key=$1 + local sensitiveKeys="password|secret|key|token" + + if [ -z "${key}" ]; then + return 1 + else + local lowercaseKey=$(echo "${key}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + [[ "${lowercaseKey}" =~ ${sensitiveKeys} ]] && return 0 || return 1 + fi +} + +getPrintableValueOfKey(){ + local displayValue= + local key="$1" + if [ -z "$key" ]; then + # This is actually an incorrect usage of this method but any logging will cause unexpected content in the caller + echo -n "" + return + fi + + local value="$2" + isKeySensitive "${key}" && displayValue="$SENSITIVE_KEY_VALUE" || displayValue="${value}" + echo -n $displayValue +} + +_createConsoleLog(){ + if [ -z "${JF_PRODUCT_HOME}" ]; then + return + fi + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + mkdir -p "${JF_PRODUCT_HOME}/var/log" || true + if [ ! -f ${targetFile} ]; then + touch $targetFile > /dev/null 2>&1 || true + fi + chmod 640 $targetFile > /dev/null 2>&1 || true +} + +# Output from application's logs are piped to this method. It checks a configuration variable to determine if content should be logged to +# the common console.log file +redirectServiceLogsToFile() { + + local result="0" + # check if the function getSystemValue exists + LC_ALL=C type getSystemValue > /dev/null 2>&1 || result="$?" + if [[ "$result" != "0" ]]; then + warn "Couldn't find the systemYamlHelper. Skipping log redirection" + return 0 + fi + + getSystemValue "shared.consoleLog" "NOT_SET" + if [[ "${YAML_VALUE}" == "false" ]]; then + logger "Redirection is set to false. Skipping log redirection" + return 0; + fi + + if [ -z "${JF_PRODUCT_HOME}" ] || [ "${JF_PRODUCT_HOME}" == "" ]; then + warn "JF_PRODUCT_HOME is unavailable. Skipping log redirection" + return 0 + fi + + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + + _createConsoleLog + + while read -r line; do + printf '%s\n' "${line}" >> $targetFile || return 0 # Don't want to log anything - might clutter the screen + done +} + +## Display environment variables starting with JF_ along with its value +## Value of sensitive keys will be displayed as "******" +## +## Sample Display : +## +## ======================== +## JF Environment variables +## ======================== +## +## JF_SHARED_NODE_ID : locahost +## JF_SHARED_JOINKEY : ****** +## +## +displayEnv() { + local JFEnv=$(printenv | grep ^JF_ 2>/dev/null) + local key= + local value= + + if [ -z "${JFEnv}" ]; then + return + fi + + cat << ENV_START_MESSAGE + +======================== +JF Environment variables +======================== +ENV_START_MESSAGE + + for entry in ${JFEnv}; do + key=$(echo "${entry}" | awk -F'=' '{print $1}') + value=$(echo "${entry}" | awk -F'=' '{print $2}') + + isKeySensitive "${key}" && value="******" || value=${value} + + printf "\n%-35s%s" "${key}" " : ${value}" + done + echo; +} + +_addLogRotateConfiguration() { + logDebug "Method ${FUNCNAME[0]}" + # mandatory inputs + local confFile="$1" + local logFile="$2" + + # Method available in _ioOperations.sh + LC_ALL=C type io_setYQPath > /dev/null 2>&1 || return 1 + + io_setYQPath + + # Method available in _systemYamlHelper.sh + LC_ALL=C type getSystemValue > /dev/null 2>&1 || return 1 + + local frequency="daily" + local archiveFolder="archived" + + local compressLogFiles= + getSystemValue "shared.logging.rotation.compress" "true" + if [[ "${YAML_VALUE}" == "true" ]]; then + compressLogFiles="compress" + fi + + getSystemValue "shared.logging.rotation.maxFiles" "10" + local noOfBackupFiles="${YAML_VALUE}" + + getSystemValue "shared.logging.rotation.maxSizeMb" "25" + local sizeOfFile="${YAML_VALUE}M" + + logDebug "Adding logrotate configuration for [$logFile] to [$confFile]" + + # Add configuration to file + local confContent=$(cat << LOGROTATECONF +$logFile { + $frequency + missingok + rotate $noOfBackupFiles + $compressLogFiles + notifempty + olddir $archiveFolder + dateext + extension .log + dateformat -%Y-%m-%d + size ${sizeOfFile} +} +LOGROTATECONF +) + echo "${confContent}" > ${confFile} || return 1 +} + +_operationIsBySameUser() { + local targetUser="$1" + local currentUserID=$(id -u) + local currentUserName=$(id -un) + + if [ $currentUserID == $targetUser ] || [ $currentUserName == $targetUser ]; then + echo -n "yes" + else + echo -n "no" + fi +} + +_addCronJobForLogrotate() { + logDebug "Method ${FUNCNAME[0]}" + + # Abort if logrotate is not available + [ "$(io_commandExists 'crontab')" != "yes" ] && warn "cron is not available" && return 1 + + # mandatory inputs + local productHome="$1" + local confFile="$2" + local cronJobOwner="$3" + + # We want to use our binary if possible. It may be more recent than the one in the OS + local logrotateBinary="$productHome/app/third-party/logrotate/logrotate" + + if [ ! -f "$logrotateBinary" ]; then + logrotateBinary="logrotate" + [ "$(io_commandExists 'logrotate')" != "yes" ] && warn "logrotate is not available" && return 1 + fi + local cmd="$logrotateBinary ${confFile} --state $productHome/var/etc/logrotate/logrotate-state" #--verbose + + id -u $cronJobOwner > /dev/null 2>&1 || { warn "User $cronJobOwner does not exist. Aborting logrotate configuration" && return 1; } + + # Remove the existing line + removeLogRotation "$productHome" "$cronJobOwner" || true + + # Run logrotate daily at 23:55 hours + local cronInterval="55 23 * * * $cmd" + + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + # If this is standalone mode, we cannot use -u - the user running this process may not have the necessary privileges + if [ "$standaloneMode" == "no" ]; then + (crontab -l -u $cronJobOwner 2>/dev/null; echo "$cronInterval") | crontab -u $cronJobOwner - + else + (crontab -l 2>/dev/null; echo "$cronInterval") | crontab - + fi +} + +## Configure logrotate for a product +## Failure conditions: +## If logrotation could not be setup for some reason +## Parameters: +## $1: The product name +## $2: The product home +## Depends on global: none +## Updates global: none +## Returns: NA + +configureLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + + # mandatory inputs + local productName="$1" + if [ -z $productName ]; then + warn "Incorrect usage. A product name is necessary for configuring log rotation" && return 1 + fi + + local productHome="$2" + if [ -z $productHome ]; then + warn "Incorrect usage. A product home folder is necessary for configuring log rotation" && return 1 + fi + + local logFile="${productHome}/var/log/console.log" + if [[ $(uname) == "Darwin" ]]; then + logger "Log rotation for [$logFile] has not been configured. Please setup manually" + return 0 + fi + + local userID="$3" + if [ -z $userID ]; then + warn "Incorrect usage. A userID is necessary for configuring log rotation" && return 1 + fi + + local groupID=${4:-$userID} + local logConfigOwner=${5:-$userID} + + logDebug "Configuring log rotation as user [$userID], group [$groupID], effective cron User [$logConfigOwner]" + + local errorMessage="Could not configure logrotate. Please configure log rotation of the file: [$logFile] manually" + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + # TODO move to recursive method + createDir "${productHome}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log/archived" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + + # TODO move to recursive method + createDir "${productHome}/var/etc" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/etc/logrotate" "$logConfigOwner" || { warn "${errorMessage}" && return 1; } + + # conf file should be owned by the user running the script + createFile "${confFile}" "${logConfigOwner}" || { warn "Could not create configuration file [$confFile]" return 1; } + + _addLogRotateConfiguration "${confFile}" "${logFile}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + _addCronJobForLogrotate "${productHome}" "${confFile}" "${logConfigOwner}" || { warn "${errorMessage}" && return 1; } +} + +_pauseExecution() { + if [ "${VERBOSE_MODE}" == "debug" ]; then + + local breakPoint="$1" + if [ ! -z "$breakPoint" ]; then + printf "${cBlue}Breakpoint${cClear} [$breakPoint] " + echo "" + fi + printf "${cBlue}Press enter once you are ready to continue${cClear}" + read -s choice + echo "" + fi +} + +# removeLogRotation "$productHome" "$cronJobOwner" || true +removeLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + if [[ $(uname) == "Darwin" ]]; then + logDebug "Not implemented for Darwin." + return 0 + fi + local productHome="$1" + local cronJobOwner="$2" + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + if [ "$standaloneMode" == "no" ]; then + crontab -l -u $cronJobOwner 2>/dev/null | grep -v "$confFile" | crontab -u $cronJobOwner - + else + crontab -l 2>/dev/null | grep -v "$confFile" | crontab - + fi +} + +# NOTE: This method does not check the configuration to see if redirection is necessary. +# This is intentional. If we don't redirect, tomcat logs might get redirected to a folder/file +# that does not exist, causing the service itself to not start +setupTomcatRedirection() { + logDebug "Method ${FUNCNAME[0]}" + local consoleLog="${JF_PRODUCT_HOME}/var/log/console.log" + _createConsoleLog + export CATALINA_OUT="${consoleLog}" +} + +setupScriptLogsRedirection() { + logDebug "Method ${FUNCNAME[0]}" + if [ -z "${JF_PRODUCT_HOME}" ]; then + logDebug "No JF_PRODUCT_HOME. Returning" + return + fi + # Create the console.log file if it is not already present + # _createConsoleLog || true + # # Ensure any logs (logger/logError/warn) also get redirected to the console.log + # # Using installer.log as a temparory fix. Please change this to console.log once INST-291 is fixed + export LOG_BEHAVIOR_ADD_REDIRECTION="${JF_PRODUCT_HOME}/var/log/console.log" + export LOG_BEHAVIOR_ADD_META="$FLAG_Y" +} + +# Returns Y if this method is run inside a container +isRunningInsideAContainer() { + if [ -f "/.dockerenv" ]; then + echo -n "$FLAG_Y" + else + echo -n "$FLAG_N" + fi +} + +POSTGRES_USER=999 +NGINX_USER=104 +NGINX_GROUP=107 +ES_USER=1000 +REDIS_USER=999 +MONGO_USER=999 +RABBITMQ_USER=999 +LOG_FILE_PERMISSION=640 +PID_FILE_PERMISSION=644 + +# Copy file +copyFile(){ + local source=$1 + local target=$2 + local mode=${3:-overwrite} + local enableVerbose=${4:-"${FLAG_N}"} + local verboseFlag="" + + if [ ! -z "${enableVerbose}" ] && [ "${enableVerbose}" == "${FLAG_Y}" ]; then + verboseFlag="-v" + fi + + if [[ ! ( $source && $target ) ]]; then + warn "Source and target is mandatory to copy file" + return 1 + fi + + if [[ -f "${target}" ]]; then + [[ "$mode" = "overwrite" ]] && ( cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}") || true + else + cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}" + fi +} + +# Copy files recursively from given source directory to destination directory +# This method wil copy but will NOT overwrite +# Destination will be created if its not available +copyFilesNoOverwrite(){ + local src=$1 + local dest=$2 + local enableVerboseCopy="${3:-${FLAG_Y}}" + + if [[ -z "${src}" || -z "${dest}" ]]; then + return + fi + + if [ -d "${src}" ] && [ "$(ls -A ${src})" ]; then + local relativeFilePath="" + local targetFilePath="" + + for file in $(find ${src} -type f 2>/dev/null) ; do + # Derive relative path and attach it to destination + # Example : + # src=/extra_config + # dest=/var/opt/jfrog/artifactory/etc + # file=/extra_config/config.xml + # relativeFilePath=config.xml + # targetFilePath=/var/opt/jfrog/artifactory/etc/config.xml + relativeFilePath=${file/${src}/} + targetFilePath=${dest}${relativeFilePath} + + createDir "$(dirname "$targetFilePath")" + copyFile "${file}" "${targetFilePath}" "no_overwrite" "${enableVerboseCopy}" + done + fi +} + +# TODO : WINDOWS ? +# Check the max open files and open processes set on the system +checkULimits () { + local minMaxOpenFiles=${1:-32000} + local minMaxOpenProcesses=${2:-1024} + local setValue=${3:-true} + local warningMsgForFiles=${4} + local warningMsgForProcesses=${5} + + logger "Checking open files and processes limits" + + local currentMaxOpenFiles=$(ulimit -n) + logger "Current max open files is $currentMaxOpenFiles" + if [ ${currentMaxOpenFiles} != "unlimited" ] && [ "$currentMaxOpenFiles" -lt "$minMaxOpenFiles" ]; then + if [ "${setValue}" ]; then + ulimit -n "${minMaxOpenFiles}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForFiles}" ] || warn "${warningMsgForFiles}" + else + errorExit "Max number of open files $currentMaxOpenFiles, is too low. Cannot run the application!" + fi + fi + + local currentMaxOpenProcesses=$(ulimit -u) + logger "Current max open processes is $currentMaxOpenProcesses" + if [ "$currentMaxOpenProcesses" != "unlimited" ] && [ "$currentMaxOpenProcesses" -lt "$minMaxOpenProcesses" ]; then + if [ "${setValue}" ]; then + ulimit -u "${minMaxOpenProcesses}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForProcesses}" ] || warn "${warningMsgForProcesses}" + else + errorExit "Max number of open files $currentMaxOpenProcesses, is too low. Cannot run the application!" + fi + fi +} + +createDirs() { + local appDataDir=$1 + local serviceName=$2 + local folders="backup bootstrap data etc logs work" + + [ -z "${appDataDir}" ] && errorExit "An application directory is mandatory to create its data structure" || true + [ -z "${serviceName}" ] && errorExit "A service name is mandatory to create service data structure" || true + + for folder in ${folders} + do + folder=${appDataDir}/${folder}/${serviceName} + if [ ! -d "${folder}" ]; then + logger "Creating folder : ${folder}" + mkdir -p "${folder}" || errorExit "Failed to create ${folder}" + fi + done +} + + +testReadWritePermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local test_file=${dir_to_check}/test-permissions + + # Write file + if echo test > ${test_file} 1> /dev/null 2>&1; then + # Write succeeded. Testing read... + if cat ${test_file} > /dev/null; then + rm -f ${test_file} + else + error=true + fi + else + error=true + fi + + if [ ${error} == true ]; then + return 1 + else + return 0 + fi +} + +# Test directory has read/write permissions for current user +testDirectoryPermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local u_id=$(id -u) + local id_str="id ${u_id}" + + logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}" + + if ! testReadWritePermissions ${dir_to_check}; then + error=true + fi + + if [ "${error}" == true ]; then + local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check}) + logger "###########################################################" + logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}" + logger "${stat_data}" + logger "Mounted directory must have read/write permissions for user ${id_str}" + logger "###########################################################" + errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}" + fi + logger "Permissions for ${dir_to_check} are good" +} + +# Utility method to create a directory path recursively with chown feature as +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: Root directory from where the path can be created +## $2: List of recursive child directories seperated by space +## $3: user who should own the directory. Optional +## $4: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA +# +# Usage: +# createRecursiveDir "/opt/jfrog/product/var" "bootstrap tomcat lib" "user_name" "group_name" +createRecursiveDir(){ + local rootDir=$1 + local pathDirs=$2 + local user=$3 + local group=${4:-${user}} + local fullPath= + + [ ! -z "${rootDir}" ] || return 0 + + createDir "${rootDir}" "${user}" "${group}" + + [ ! -z "${pathDirs}" ] || return 0 + + fullPath=${rootDir} + + for dir in ${pathDirs}; do + fullPath=${fullPath}/${dir} + createDir "${fullPath}" "${user}" "${group}" + done +} + +# Utility method to create a directory +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: directory to create +## $2: user who should own the directory. Optional +## $3: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA + +createDir(){ + local dirName="$1" + local printMessage=no + logSilly "Method ${FUNCNAME[0]} invoked with [$dirName]" + [ -z "${dirName}" ] && return + + logDebug "Attempting to create ${dirName}" + mkdir -p "${dirName}" || errorExit "Unable to create directory: [${dirName}]" + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + # Earlier, this line would have returned 1 if it failed. Now it just warns. + # This is intentional. Earlier, this line would NOT be reached if the folder already existed. + # Since it will always come to this line and the script may be running as a non-root user, this method will just warn if + # setting permissions fails (so as to not affect any existing flows) + io_setOwnershipNonRecursive "$dirName" "$userID" "$groupID" || warn "Could not set owner of [$dirName] to [$userID:$groupID]" + fi + # logging message to print created dir with user and group + local logMessage=${4:-$printMessage} + if [[ "${logMessage}" == "yes" ]]; then + logger "Successfully created directory [${dirName}]. Owner: [${userID}:${groupID}]" + fi +} + +removeSoftLinkAndCreateDir () { + local dirName="$1" + local userID="$2" + local groupID="$3" + local logMessage="$4" + removeSoftLink "${dirName}" + createDir "${dirName}" "${userID}" "${groupID}" "${logMessage}" +} + +# Utility method to remove a soft link +removeSoftLink () { + local dirName="$1" + if [[ -L "${dirName}" ]]; then + targetLink=$(readlink -f "${dirName}") + logger "Removing the symlink [${dirName}] pointing to [${targetLink}]" + rm -f "${dirName}" + fi +} + +# Check Directory exist in the path +checkDirExists () { + local directoryPath="$1" + + [[ -d "${directoryPath}" ]] && echo -n "true" || echo -n "false" +} + + +# Utility method to create a file +# Failure conditions: +# Parameters: +## $1: file to create +# Depends on global: none +# Updates global: none +# Returns: NA + +createFile(){ + local fileName="$1" + logSilly "Method ${FUNCNAME[0]} [$fileName]" + [ -f "${fileName}" ] && return 0 + touch "${fileName}" || return 1 + + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + io_setOwnership "$fileName" "$userID" "$groupID" || return 1 + fi +} + +# Check File exist in the filePath +# IMPORTANT- DON'T ADD LOGGING to this method +checkFileExists () { + local filePath="$1" + + [[ -f "${filePath}" ]] && echo -n "true" || echo -n "false" +} + +# Check for directories contains any (files or sub directories) +# IMPORTANT- DON'T ADD LOGGING to this method +checkDirContents () { + local directoryPath="$1" + if [[ "$(ls -1 "${directoryPath}" | wc -l)" -gt 0 ]]; then + echo -n "true" + else + echo -n "false" + fi +} + +# Check contents exist in directory +# IMPORTANT- DON'T ADD LOGGING to this method +checkContentExists () { + local source="$1" + + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + echo -n "false" + else + echo -n "true" + fi +} + +# Resolve the variable +# IMPORTANT- DON'T ADD LOGGING to this method +evalVariable () { + local output="$1" + local input="$2" + + eval "${output}"=\${"${input}"} + eval echo \${"${output}"} +} + +# Usage: if [ "$(io_commandExists 'curl')" == "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_commandExists() { + local commandToExecute="$1" + hash "${commandToExecute}" 2>/dev/null + local rt=$? + if [ "$rt" == 0 ]; then echo -n "yes"; else echo -n "no"; fi +} + +# Usage: if [ "$(io_curlExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_curlExists() { + io_commandExists "curl" +} + + +io_hasMatch() { + logSilly "Method ${FUNCNAME[0]}" + local result=0 + logDebug "Executing [echo \"$1\" | grep \"$2\" >/dev/null 2>&1]" + echo "$1" | grep "$2" >/dev/null 2>&1 || result=1 + return $result +} + +# Utility method to check if the string passed (usually a connection url) corresponds to this machine itself +# Failure conditions: None +# Parameters: +## $1: string to check against +# Depends on global: none +# Updates global: IS_LOCALHOST with value "yes/no" +# Returns: NA + +io_getIsLocalhost() { + logSilly "Method ${FUNCNAME[0]}" + IS_LOCALHOST="$FLAG_N" + local inputString="$1" + logDebug "Parsing [$inputString] to check if we are dealing with this machine itself" + + io_hasMatch "$inputString" "localhost" && { + logDebug "Found localhost. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for localhost" + + local hostIP=$(io_getPublicHostIP) + io_hasMatch "$inputString" "$hostIP" && { + logDebug "Found $hostIP. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostIP" + + local hostID=$(io_getPublicHostID) + io_hasMatch "$inputString" "$hostID" && { + logDebug "Found $hostID. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostID" + + local hostName=$(io_getPublicHostName) + io_hasMatch "$inputString" "$hostName" && { + logDebug "Found $hostName. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostName" + +} + +# Usage: if [ "$(io_tarExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_tarExists() { + io_commandExists "tar" +} + +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostIP() { + local OS_TYPE=$(uname) + local publicHostIP= + if [ "${OS_TYPE}" == "Darwin" ]; then + ipStatus=$(ifconfig en0 | grep "status" | awk '{print$2}') + if [ "${ipStatus}" == "active" ]; then + publicHostIP=$(ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}') + else + errorExit "Host IP could not be resolved!" + fi + elif [ "${OS_TYPE}" == "Linux" ]; then + publicHostIP=$(hostname -i 2>/dev/null || echo "127.0.0.1") + fi + publicHostIP=$(echo "${publicHostIP}" | awk '{print $1}') + echo -n "${publicHostIP}" +} + +# Will return the short host name (up to the first dot) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostName() { + echo -n "$(hostname -s)" +} + +# Will return the full host name (use this as much as possible) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostID() { + echo -n "$(hostname)" +} + +# Utility method to backup a file +# Failure conditions: NA +# Parameters: filePath +# Depends on global: none, +# Updates global: none +# Returns: NA +io_backupFile() { + logSilly "Method ${FUNCNAME[0]}" + fileName="$1" + if [ ! -f "${filePath}" ]; then + logDebug "No file: [${filePath}] to backup" + return + fi + dateTime=$(date +"%Y-%m-%d-%H-%M-%S") + targetFileName="${fileName}.backup.${dateTime}" + yes | \cp -f "$fileName" "${targetFileName}" + logger "File [${fileName}] backedup as [${targetFileName}]" +} + +# Reference https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash/4025065#4025065 +is_number() { + case "$BASH_VERSION" in + 3.1.*) + PATTERN='\^\[0-9\]+\$' + ;; + *) + PATTERN='^[0-9]+$' + ;; + esac + + [[ "$1" =~ $PATTERN ]] +} + +io_compareVersions() { + if [[ $# != 2 ]] + then + echo "Usage: min_version current minimum" + return + fi + + A="${1%%.*}" + B="${2%%.*}" + + if [[ "$A" != "$1" && "$B" != "$2" && "$A" == "$B" ]] + then + io_compareVersions "${1#*.}" "${2#*.}" + else + if is_number "$A" && is_number "$B" + then + if [[ "$A" -eq "$B" ]]; then + echo "0" + elif [[ "$A" -gt "$B" ]]; then + echo "1" + elif [[ "$A" -lt "$B" ]]; then + echo "-1" + fi + fi + fi +} + +# Reference https://stackoverflow.com/questions/369758/how-to-trim-whitespace-from-a-bash-variable +# Strip all leading and trailing spaces +# IMPORTANT- DON'T ADD LOGGING to this method +io_trim() { + local var="$1" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# temporary function will be removing it ASAP +# search for string and replace text in file +replaceText_migration_hook () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + fi +} + +# search for string and replace text in file +replaceText () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + logDebug "Replaced [$regexString] with [$replaceText] in [$file]" + fi +} + +# search for string and prepend text in file +prependText () { + local regexString="$1" + local text="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + else + sed -i -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + fi +} + +# add text to beginning of the file +addText () { + local text="$1" + local file="$2" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + else + sed -i -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + fi +} + +io_replaceString () { + local value="$1" + local firstString="$2" + local secondString="$3" + local separator=${4:-"/"} + local updateValue= + if [[ $(uname) == "Darwin" ]]; then + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + else + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + fi + echo -n "${updateValue}" +} + +_findYQ() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + local parentDir="$1" + if [ -z "$parentDir" ]; then + return + fi + logDebug "Executing command [find "${parentDir}" -name third-party -type d]" + local yq=$(find "${parentDir}" -name third-party -type d) + if [ -d "${yq}/yq" ]; then + export YQ_PATH="${yq}/yq" + fi +} + + +io_setYQPath() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + if [ "$(io_commandExists 'yq')" == "yes" ]; then + return + fi + + if [ ! -z "${JF_PRODUCT_HOME}" ] && [ -d "${JF_PRODUCT_HOME}" ]; then + _findYQ "${JF_PRODUCT_HOME}" + fi + + if [ -z "${YQ_PATH}" ] && [ ! -z "${COMPOSE_HOME}" ] && [ -d "${COMPOSE_HOME}" ]; then + _findYQ "${COMPOSE_HOME}" + fi + # TODO We can remove this block after all the code is restructured. + if [ -z "${YQ_PATH}" ] && [ ! -z "${SCRIPT_HOME}" ] && [ -d "${SCRIPT_HOME}" ]; then + _findYQ "${SCRIPT_HOME}" + fi + +} + +io_getLinuxDistribution() { + LINUX_DISTRIBUTION= + + # Make sure running on Linux + [ $(uname -s) != "Linux" ] && return + + # Find out what Linux distribution we are on + + cat /etc/*-release | grep -i Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 6.x + cat /etc/issue.net | grep Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 7.x + cat /etc/*-release | grep -i centos >/dev/null 2>&1 && LINUX_DISTRIBUTION=CentOS && LINUX_DISTRIBUTION_VER="7" || true + + # OS 8.x + grep -q -i "release 8" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="8" || true + + # OS 7.x + grep -q -i "release 7" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="7" || true + + # OS 6.x + grep -q -i "release 6" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="6" || true + + cat /etc/*-release | grep -i Red | grep -i 'VERSION=7' >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat && LINUX_DISTRIBUTION_VER="7" || true + + cat /etc/*-release | grep -i debian >/dev/null 2>&1 && LINUX_DISTRIBUTION=Debian || true + + cat /etc/*-release | grep -i ubuntu >/dev/null 2>&1 && LINUX_DISTRIBUTION=Ubuntu || true +} + +## Utility method to check ownership of folders/files +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If file is not owned by the user & group +## Parameters: + ## user + ## group + ## folder to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac +io_checkOwner () { + logSilly "Method ${FUNCNAME[0]}" + local osType=$(uname) + + if [ "${osType}" != "Linux" ]; then + logDebug "Unsupported OS. Skipping check" + return 0 + fi + + local file_to_check=$1 + local user_id_to_check=$2 + + + if [ -z "$user_id_to_check" ] || [ -z "$file_to_check" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group_id_to_check=${3:-$user_id_to_check} + local check_user_name=${4:-"no"} + + logDebug "Checking permissions on [$file_to_check] for user [$user_id_to_check] & group [$group_id_to_check]" + + local stat= + + if [ "${check_user_name}" == "yes" ]; then + stat=( $(stat -Lc "%U %G" ${file_to_check}) ) + else + stat=( $(stat -Lc "%u %g" ${file_to_check}) ) + fi + + local user_id=${stat[0]} + local group_id=${stat[1]} + + if [[ "${user_id}" != "${user_id_to_check}" ]] || [[ "${group_id}" != "${group_id_to_check}" ]] ; then + logDebug "Ownership mismatch. [${file_to_check}] is not owned by [${user_id_to_check}:${group_id_to_check}]" + return 1 + else + return 0 + fi +} + +## Utility method to change ownership of a file/folder - NON recursive +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnershipNonRecursive() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown ${user}:${group} ${targetFile}]" + chown ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to change ownership of a file. +## IMPORTANT +## If being called on a folder, should ONLY be called for fresh folders or may cause performance issues +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnership() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown -R ${user}:${group} ${targetFile}]" + chown -R ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to create third party folder structure necessary for Postgres +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## POSTGRESQL_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createPostgresDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${POSTGRESQL_DATA_ROOT}" ] && return 0 + + logDebug "Property [${POSTGRESQL_DATA_ROOT}] exists. Proceeding" + + createDir "${POSTGRESQL_DATA_ROOT}/data" + io_setOwnership "${POSTGRESQL_DATA_ROOT}" "${POSTGRES_USER}" "${POSTGRES_USER}" || errorExit "Setting ownership of [${POSTGRESQL_DATA_ROOT}] to [${POSTGRES_USER}:${POSTGRES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Nginx +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## NGINX_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createNginxDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${NGINX_DATA_ROOT}" ] && return 0 + + logDebug "Property [${NGINX_DATA_ROOT}] exists. Proceeding" + + createDir "${NGINX_DATA_ROOT}" + io_setOwnership "${NGINX_DATA_ROOT}" "${NGINX_USER}" "${NGINX_GROUP}" || errorExit "Setting ownership of [${NGINX_DATA_ROOT}] to [${NGINX_USER}:${NGINX_GROUP}] failed" +} + +## Utility method to create third party folder structure necessary for ElasticSearch +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## ELASTIC_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createElasticSearchDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${ELASTIC_DATA_ROOT}" ] && return 0 + + logDebug "Property [${ELASTIC_DATA_ROOT}] exists. Proceeding" + + createDir "${ELASTIC_DATA_ROOT}/data" + io_setOwnership "${ELASTIC_DATA_ROOT}" "${ES_USER}" "${ES_USER}" || errorExit "Setting ownership of [${ELASTIC_DATA_ROOT}] to [${ES_USER}:${ES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Redis +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## REDIS_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRedisDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${REDIS_DATA_ROOT}" ] && return 0 + + logDebug "Property [${REDIS_DATA_ROOT}] exists. Proceeding" + + createDir "${REDIS_DATA_ROOT}" + io_setOwnership "${REDIS_DATA_ROOT}" "${REDIS_USER}" "${REDIS_USER}" || errorExit "Setting ownership of [${REDIS_DATA_ROOT}] to [${REDIS_USER}:${REDIS_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Mongo +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## MONGODB_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createMongoDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${MONGODB_DATA_ROOT}" ] && return 0 + + logDebug "Property [${MONGODB_DATA_ROOT}] exists. Proceeding" + + createDir "${MONGODB_DATA_ROOT}/logs" + createDir "${MONGODB_DATA_ROOT}/configdb" + createDir "${MONGODB_DATA_ROOT}/db" + io_setOwnership "${MONGODB_DATA_ROOT}" "${MONGO_USER}" "${MONGO_USER}" || errorExit "Setting ownership of [${MONGODB_DATA_ROOT}] to [${MONGO_USER}:${MONGO_USER}] failed" +} + +## Utility method to create third party folder structure necessary for RabbitMQ +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## RABBITMQ_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRabbitMQDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${RABBITMQ_DATA_ROOT}" ] && return 0 + + logDebug "Property [${RABBITMQ_DATA_ROOT}] exists. Proceeding" + + createDir "${RABBITMQ_DATA_ROOT}" + io_setOwnership "${RABBITMQ_DATA_ROOT}" "${RABBITMQ_USER}" "${RABBITMQ_USER}" || errorExit "Setting ownership of [${RABBITMQ_DATA_ROOT}] to [${RABBITMQ_USER}:${RABBITMQ_USER}] failed" +} + +# Add or replace a property in provided properties file +addOrReplaceProperty() { + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + local delimiter=${4:-"="} + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}\s*${delimiter}.*$" ${propertiesPath} > /dev/null 2>&1 + [ $? -ne 0 ] && echo -e "\n${propertyName}${delimiter}${propertyValue}" >> ${propertiesPath} + sed -i -e "s|^${propertyName}\s*${delimiter}.*$|${propertyName}${delimiter}${propertyValue}|g;" ${propertiesPath} +} + +# Set property only if its not set +io_setPropertyNoOverride(){ + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}:" ${propertiesPath} > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${propertyName}: ${propertyValue}" >> ${propertiesPath} || warn "Setting property ${propertyName}: ${propertyValue} in [ ${propertiesPath} ] failed" + else + logger "Skipping update of property : ${propertyName}" >&6 + fi +} + +# Add a line to a file if it doesn't already exist +addLine() { + local line_to_add=$1 + local target_file=$2 + logger "Trying to add line $1 to $2" >&6 2>&1 + cat "$target_file" | grep -F "$line_to_add" -wq >&6 2>&1 + if [ $? != 0 ]; then + logger "Line does not exist and will be added" >&6 2>&1 + echo $line_to_add >> $target_file || errorExit "Could not update $target_file" + fi +} + +# Utility method to check if a value (first paramter) exists in an array (2nd parameter) +# 1st parameter "value to find" +# 2nd parameter "The array to search in. Please pass a string with each value separated by space" +# Example: containsElement "y" "y Y n N" +containsElement () { + local searchElement=$1 + local searchArray=($2) + local found=1 + for elementInIndex in "${searchArray[@]}";do + if [[ $elementInIndex == $searchElement ]]; then + found=0 + fi + done + return $found +} + +# Utility method to get user's choice +# 1st parameter "what to ask the user" +# 2nd parameter "what choices to accept, separated by spaces" +# 3rd parameter "what is the default choice (to use if the user simply presses Enter)" +# Example 'getUserChoice "Are you feeling lucky? Punk!" "y n Y N" "y"' +getUserChoice(){ + configureLogOutput + read_timeout=${read_timeout:-0.5} + local choice="na" + local text_to_display=$1 + local choices=$2 + local default_choice=$3 + users_choice= + + until containsElement "$choice" "$choices"; do + echo "";echo ""; + sleep $read_timeout #This ensures correct placement of the question. + read -p "$text_to_display :" choice + : ${choice:=$default_choice} + done + users_choice=$choice + echo -e "\n$text_to_display: $users_choice" >&6 + sleep $read_timeout #This ensures correct logging +} + +setFilePermission () { + local permission=$1 + local file=$2 + chmod "${permission}" "${file}" || warn "Setting permission ${permission} to file [ ${file} ] failed" +} + + +#setting required paths +setAppDir (){ + SCRIPT_DIR=$(dirname $0) + SCRIPT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + APP_DIR="`cd "${SCRIPT_HOME}";pwd`" +} + +ZIP_TYPE="zip" +COMPOSE_TYPE="compose" +HELM_TYPE="helm" +RPM_TYPE="rpm" +DEB_TYPE="debian" + +sourceScript () { + local file="$1" + + [ ! -z "${file}" ] || errorExit "target file is not passed to source a file" + + if [ ! -f "${file}" ]; then + errorExit "${file} file is not found" + else + source "${file}" || errorExit "Unable to source ${file}, please check if the user ${USER} has permissions to perform this action" + fi +} +# Source required helpers +initHelpers () { + local systemYamlHelper="${APP_DIR}/systemYamlHelper.sh" + local thirdPartyDir=$(find ${APP_DIR}/.. -name third-party -type d) + export YQ_PATH="${thirdPartyDir}/yq" + LIBXML2_PATH="${thirdPartyDir}/libxml2/bin/xmllint" + export LD_LIBRARY_PATH="${thirdPartyDir}/libxml2/lib" + sourceScript "${systemYamlHelper}" +} +# Check migration info yaml file available in the path +checkMigrationInfoYaml () { + + if [[ -f "${APP_DIR}/migrationHelmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationHelmInfo.yaml" + INSTALLER="${HELM_TYPE}" + elif [[ -f "${APP_DIR}/migrationZipInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationZipInfo.yaml" + INSTALLER="${ZIP_TYPE}" + elif [[ -f "${APP_DIR}/migrationRpmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationRpmInfo.yaml" + INSTALLER="${RPM_TYPE}" + elif [[ -f "${APP_DIR}/migrationDebInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationDebInfo.yaml" + INSTALLER="${DEB_TYPE}" + elif [[ -f "${APP_DIR}/migrationComposeInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationComposeInfo.yaml" + INSTALLER="${COMPOSE_TYPE}" + else + errorExit "File migration Info yaml does not exist in [${APP_DIR}]" + fi +} + +retrieveYamlValue () { + local yamlPath="$1" + local value="$2" + local output="$3" + local message="$4" + + [[ -z "${yamlPath}" ]] && errorExit "yamlPath is mandatory to get value from ${MIGRATION_SYSTEM_YAML_INFO}" + + getYamlValue "${yamlPath}" "${MIGRATION_SYSTEM_YAML_INFO}" "false" + value="${YAML_VALUE}" + if [[ -z "${value}" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "Empty value for ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + elif [[ "${output}" == "Skip" ]]; then + return + else + errorExit "${message}" + fi + fi +} + +checkEnv () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + # check Environment JF_PRODUCT_HOME is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_PRODUCT_HOME")" + if [[ -z "${NEW_DATA_DIR}" ]]; then + errorExit "Environment variable JF_PRODUCT_HOME is not set, this is required to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + getCustomDataDir_hook + NEW_DATA_DIR="${OLD_DATA_DIR}" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + else + # check Environment JF_ROOT_DATA_DIR is set before migration + OLD_DATA_DIR="$(evalVariable "OLD_DATA_DIR" "JF_ROOT_DATA_DIR")" + # check Environment JF_ROOT_DATA_DIR is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_ROOT_DATA_DIR")" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi + +} + +getDataDir () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}"|| "${INSTALLER}" == "${HELM_TYPE}" ]]; then + checkEnv + else + getCustomDataDir_hook + NEW_DATA_DIR="`cd "${APP_DIR}"/../../;pwd`" + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi +} + +# Retrieve Product name from MIGRATION_SYSTEM_YAML_INFO +getProduct () { + retrieveYamlValue "migration.product" "${YAML_VALUE}" "Fail" "Empty value under ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + PRODUCT="${YAML_VALUE}" + PRODUCT=$(echo "${PRODUCT}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + if [[ "${PRODUCT}" != "artifactory" && "${PRODUCT}" != "distribution" && "${PRODUCT}" != "xray" ]]; then + errorExit "migration.product in [${MIGRATION_SYSTEM_YAML_INFO}] is not correct, please set based on product as ARTIFACTORY or DISTRIBUTION" + fi + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + JF_USER="${PRODUCT}" + fi +} +# Compare product version with minProductVersion and maxProductVersion +migrateCheckVersion () { + local productVersion="$1" + local minProductVersion="$2" + local maxProductVersion="$3" + local productVersion618="6.18.0" + local unSupportedProductVersions7=("7.2.0 7.2.1") + + if [[ "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 1 ]]; then + logger "Migration not necessary. ${PRODUCT} is already ${productVersion}" + exit 11 + elif [[ "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 1 ]]; then + if [[ ("$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 1) && " ${unSupportedProductVersions7[@]} " =~ " ${CURRENT_VERSION} " ]]; then + touch /tmp/error; + errorExit "Current ${PRODUCT} version (${productVersion}) does not support migration to ${CURRENT_VERSION}" + else + bannerStart "Detected ${PRODUCT} ${productVersion}, initiating migration" + fi + else + logger "Current ${PRODUCT} ${productVersion} version is not supported for migration" + exit 1 + fi +} + +getProductVersion () { + local minProductVersion="$1" + local maxProductVersion="$2" + local newfilePath="$3" + local oldfilePath="$4" + local propertyInDocker="$5" + local property="$6" + local productVersion= + local status= + + if [[ "$INSTALLER" == "${COMPOSE_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + elif [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${propertyInDocker}" "${newfilePath}")" + status="fail" + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + exit 0 + fi + elif [[ "$INSTALLER" == "${HELM_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + else + productVersion="${CURRENT_VERSION}" + [[ -z "${productVersion}" || "${productVersion}" == "" ]] && logger "${PRODUCT} CURRENT_VERSION is not set" && exit 0 + fi + else + if [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${property}" "${newfilePath}")" + status="fail" + elif [[ -f "${oldfilePath}" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + status="success" + else + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + logger "File [${newfilePath}] not found to get current version." + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + fi + exit 0 + fi + fi + if [[ -z "${productVersion}" || "${productVersion}" == "" ]]; then + [[ "${status}" == "success" ]] && logger "No version found in file [${oldfilePath}]." + [[ "${status}" == "fail" ]] && logger "No version found in file [${newfilePath}]." + exit 0 + fi + + migrateCheckVersion "${productVersion}" "${minProductVersion}" "${maxProductVersion}" +} + +readKey () { + local property="$1" + local file="$2" + local version= + + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + version="${value}" && check=true && break + else + check=false + fi + done < "${file}" + if [[ "${check}" == "false" ]]; then + return + fi + echo "${version}" +} + +# create Log directory +createLogDir () { + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" + fi +} + +# Creating migration log file +creationMigrateLog () { + local LOG_FILE_NAME="migration.log" + createLogDir + local MIGRATION_LOG_FILE="${NEW_DATA_DIR}/log/${LOG_FILE_NAME}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + MIGRATION_LOG_FILE="${SCRIPT_HOME}/${LOG_FILE_NAME}" + fi + touch "${MIGRATION_LOG_FILE}" + setFilePermission "${LOG_FILE_PERMISSION}" "${MIGRATION_LOG_FILE}" + exec &> >(tee -a "${MIGRATION_LOG_FILE}") +} +# Set path where system.yaml should create +setSystemYamlPath () { + SYSTEM_YAML_PATH="${NEW_DATA_DIR}/etc/system.yaml" + if [[ "${INSTALLER}" != "${HELM_TYPE}" ]]; then + logger "system.yaml will be created in path [${SYSTEM_YAML_PATH}]" + fi +} +# Create directory +createDirectory () { + local directory="$1" + local output="$2" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${directory}" + mkdir -p "${directory}" && check=true || check=false + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi + setOwnershipBasedOnInstaller "${directory}" +} + +setOwnershipBasedOnInstaller () { + local directory="$1" + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + chown -R ${USER_TO_CHECK}:${GROUP_TO_CHECK} "${directory}" || warn "Setting ownership on $directory failed" + elif [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + io_setOwnership "${directory}" "${JF_USER}" "${JF_USER}" + fi +} + +getUserAndGroup () { + local file="$1" + read uid gid <<<$(stat -c '%U %G' ${file}) + USER_TO_CHECK="${uid}" + GROUP_TO_CHECK="${gid}" +} + +# set ownership +getUserAndGroupFromFile () { + case $PRODUCT in + artifactory) + getUserAndGroup "/etc/opt/jfrog/artifactory/artifactory.properties" + ;; + distribution) + getUserAndGroup "${OLD_DATA_DIR}/etc/versions.properties" + ;; + xray) + getUserAndGroup "${OLD_DATA_DIR}/security/master.key" + ;; + esac +} + +# creating required directories +createRequiredDirs () { + bannerSubSection "CREATING REQUIRED DIRECTORIES" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${JF_USER}" "${JF_USER}" "yes" + io_setOwnership "${NEW_DATA_DIR}" "${JF_USER}" "${JF_USER}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data/postgres" "${POSTGRES_USER}" "${POSTGRES_USER}" "yes" + fi + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + fi +} + +# Check entry in map is format +checkMapEntry () { + local entry="$1" + + [[ "${entry}" != *"="* ]] && echo -n "false" || echo -n "true" +} +# Check value Empty and warn +warnIfEmpty () { + local filePath="$1" + local yamlPath="$2" + local check= + + if [[ -z "${filePath}" ]]; then + warn "Empty value in yamlpath [${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + check=false + else + check=true + fi + echo "${check}" +} + +logCopyStatus () { + local status="$1" + local logMessage="$2" + local warnMessage="$3" + + [[ "${status}" == "success" ]] && logger "${logMessage}" + [[ "${status}" == "fail" ]] && warn "${warnMessage}" +} +# copy contents from source to destination +copyCmd () { + local source="$1" + local target="$2" + local mode="$3" + local status= + + case $mode in + unique) + cp -up "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + specific) + cp -pf "${source}" "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied file [${source}] to [${target}]" "Failed to copy file [${source}] to [${target}]" + ;; + patternFiles) + cp -pf "${source}"* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied files matching [${source}*] to [${target}]" "Failed to copy files matching [${source}*] to [${target}]" + ;; + full) + cp -prf "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + esac +} +# Check contents exist in source before copying +copyOnContentExist () { + local source="$1" + local target="$2" + local mode="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + copyCmd "${source}" "${target}" "${mode}" + else + logger "No contents to copy from [${source}]" + fi +} + +# move source to destination +moveCmd () { + local source="$1" + local target="$2" + local status= + + mv -f "${source}" "${target}" && status="success" || status="fail" + [[ "${status}" == "success" ]] && logger "Successfully moved directory [${source}] to [${target}]" + [[ "${status}" == "fail" ]] && warn "Failed to move directory [${source}] to [${target}]" +} + +# symlink target to source +symlinkCmd () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + local check=false + + if [[ "${symlinkSubDir}" == "subDir" ]]; then + ln -sf "${source}"/* "${target}" && check=true || check=false + else + ln -sf "${source}" "${target}" && check=true || check=false + fi + + [[ "${check}" == "true" ]] && logger "Successfully symlinked directory [${target}] to old [${source}]" + [[ "${check}" == "false" ]] && warn "Symlink operation failed" +} +# Check contents exist in source before symlinking +symlinkOnExist () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + if [[ "${symlinkSubDir}" == "subDir" ]]; then + symlinkCmd "${source}" "${target}" "subDir" + else + symlinkCmd "${source}" "${target}" + fi + else + logger "No contents to symlink from [${source}]" + fi +} + +prependDir () { + local absolutePath="$1" + local fullPath="$2" + local sourcePath= + + if [[ "${absolutePath}" = \/* ]]; then + sourcePath="${absolutePath}" + else + sourcePath="${fullPath}" + fi + echo "${sourcePath}" +} + +getFirstEntry (){ + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $1}' +} + +getSecondEntry () { + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $2}' +} +# To get absolutePath +pathResolver () { + local directoryPath="$1" + local dataDir= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Warning" + dataDir="${YAML_VALUE}" + cd "${dataDir}" + else + cd "${OLD_DATA_DIR}" + fi + absoluteDir="`cd "${directoryPath}";pwd`" + echo "${absoluteDir}" +} + +checkPathResolver () { + local value="$1" + + if [[ "${value}" == \/* ]]; then + value="${value}" + else + value="$(pathResolver "${value}")" + fi + echo "${value}" +} + +propertyMigrate () { + local entry="$1" + local filePath="$2" + local fileName="$3" + local check=false + + local yamlPath="$(getFirstEntry "${entry}")" + local property="$(getSecondEntry "${entry}")" + if [[ -z "${property}" ]]; then + warn "Property is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${property}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + value="$(migrateResolveDerbyPath "${key}" "${value}")" + value="$(migrateResolveHaDirPath "${key}" "${value}")" + value="$(updatePostgresUrlString_Hook "${yamlPath}" "${value}")" + fi + if [[ "${key}" == "context.url" ]]; then + local ip=$(echo "${value}" | awk -F/ '{print $3}' | sed 's/:.*//') + setSystemValue "shared.node.ip" "${ip}" "${SYSTEM_YAML_PATH}" + logger "Setting [shared.node.ip] with [${ip}] in system.yaml" + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" && logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" && check=true && break || check=false + fi + done < "${NEW_DATA_DIR}/${filePath}/${fileName}" + [[ "${check}" == "false" ]] && logger "Property [${property}] not found in file [${fileName}]" +} + +setHaEnabled_hook () { + echo "" +} + +migratePropertiesFiles () { + local fileList= + local filePath= + local fileName= + local map= + + retrieveYamlValue "migration.propertyFiles.files" "fileList" "Skip" + fileList="${YAML_VALUE}" + if [[ -z "${fileList}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF PROPERTY FILES" + for file in ${fileList}; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.propertyFiles.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.propertyFiles.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + if [[ "$(checkFileExists "${NEW_DATA_DIR}/${filePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + # setting haEnabled with true only if ha-node.properties is present + setHaEnabled_hook "${filePath}" + retrieveYamlValue "migration.propertyFiles.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + propertyMigrate "${entry}" "${filePath}" "${fileName}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=property" + fi + done + else + logger "File [${fileName}] was not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} + +createTargetDir () { + local mountDir="$1" + local target="$2" + + logger "Target directory not found [${mountDir}/${target}], creating it" + createDirectoryRecursive "${mountDir}" "${target}" "Warning" +} + +createDirectoryRecursive () { + local mountDir="$1" + local target="$2" + local output="$3" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${mountDir}/${target}" + local directory=$(echo "${target}" | tr '/' ' ' ) + local targetDir="${mountDir}" + for dir in ${directory}; + do + targetDir="${targetDir}/${dir}" + mkdir -p "${targetDir}" && check=true || check=false + setOwnershipBasedOnInstaller "${targetDir}" + done + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi +} + +copyOperation () { + local source="$1" + local target="$2" + local mode="$3" + local check=false + local targetDataDir= + local targetLink= + local date= + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + #remove source if it is a symlink + if [[ -L "${source}" ]]; then + targetLink=$(readlink -f "${source}") + logger "Removing the symlink [${source}] pointing to [${targetLink}]" + rm -f "${source}" + source=${targetLink} + fi + if [[ "$(checkDirExists "${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path" + return + fi + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + logger "No contents to copy from [${source}]" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyOnContentExist "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copySpecificFiles () { + local source="$1" + local target="$2" + local mode="$3" + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkFileExists "${source}")" != "true" ]]; then + logger "Source file [${source}] does not exist in path" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copyPatternMatchingFiles () { + local source="$1" + local target="$2" + local mode="$3" + local sourcePath="${4}" + + # prepend OLD_DATA_DIR only if source is relative path + sourcePath="$(prependDir "${sourcePath}" "${OLD_DATA_DIR}/${sourcePath}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkDirExists "${sourcePath}")" != "true" ]]; then + logger "Source [${sourcePath}] directory not found in path" + return + fi + if ls "${sourcePath}/${source}"* 1> /dev/null 2>&1; then + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${sourcePath}/${source}" "${targetDataDir}/${target}" "${mode}" + else + logger "Source file [${sourcePath}/${source}*] does not exist in path" + fi +} + +copyLogMessage () { + local mode="$1" + case $mode in + specific) + logger "Copy file [${source}] to target [${targetDataDir}/${target}]" + ;; + patternFiles) + logger "Copy files matching [${sourcePath}/${source}*] to target [${targetDataDir}/${target}]" + ;; + full) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + unique) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + esac +} + +copyBannerMessages () { + local mode="$1" + local textMode="$2" + case $mode in + specific) + bannerSection "COPY ${textMode} FILES" + ;; + patternFiles) + bannerSection "COPY MATCHING ${textMode}" + ;; + full) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + unique) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + esac +} + +invokeCopyFunctions () { + local mode="$1" + local source="$2" + local target="$3" + + case $mode in + specific) + copySpecificFiles "${source}" "${target}" "${mode}" + ;; + patternFiles) + retrieveYamlValue "migration.${copyFormat}.sourcePath" "map" "Warning" + local sourcePath="${YAML_VALUE}" + copyPatternMatchingFiles "${source}" "${target}" "${mode}" "${sourcePath}" + ;; + full) + copyOperation "${source}" "${target}" "${mode}" + ;; + unique) + copyOperation "${source}" "${target}" "${mode}" + ;; + esac +} +# Copies contents from source directory and target directory +copyDataDirectories () { + local copyFormat="$1" + local mode="$2" + local map= + local source= + local target= + local textMode= + local targetDataDir= + local copyFormatValue= + + retrieveYamlValue "migration.${copyFormat}" "${copyFormat}" "Skip" + copyFormatValue="${YAML_VALUE}" + if [[ -z "${copyFormatValue}" ]]; then + return + fi + textMode=$(echo "${mode}" | tr '[:lower:]' '[:upper:]' 2>/dev/null) + copyBannerMessages "${mode}" "${textMode}" + retrieveYamlValue "migration.${copyFormat}.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeCopyFunctions "${mode}" "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +invokeMoveFunctions () { + local source="$1" + local target="$2" + local sourceDataDir= + local targetBasename= + # prepend OLD_DATA_DIR only if source is relative path + sourceDataDir=$(prependDir "${source}" "${OLD_DATA_DIR}/${source}") + targetBasename=$(dirname "${target}") + logger "Moving directory source [${sourceDataDir}] to target [${NEW_DATA_DIR}/${target}]" + if [[ "$(checkDirExists "${sourceDataDir}")" != "true" ]]; then + logger "Directory [${sourceDataDir}] not found in path to move" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${targetBasename}")" != "true" ]]; then + createTargetDir "${NEW_DATA_DIR}" "${targetBasename}" + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/${target}" + else + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/tempDir" + moveCmd "${NEW_DATA_DIR}/tempDir" "${NEW_DATA_DIR}/${target}" + fi +} + +# Move source directory and target directory +moveDirectories () { + local moveDataDirectories= + local map= + local source= + local target= + + retrieveYamlValue "migration.moveDirectories" "moveDirectories" "Skip" + moveDirectories="${YAML_VALUE}" + if [[ -z "${moveDirectories}" ]]; then + return + fi + bannerSection "MOVE DIRECTORIES" + retrieveYamlValue "migration.moveDirectories.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeMoveFunctions "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +# Trim masterKey if its generated using hex 32 +trimMasterKey () { + local masterKeyDir=/opt/jfrog/artifactory/var/etc/security + local oldMasterKey=$(<${masterKeyDir}/master.key) + local oldMasterKey_Length=$(echo ${#oldMasterKey}) + local newMasterKey= + if [[ ${oldMasterKey_Length} -gt 32 ]]; then + bannerSection "TRIM MASTERKEY" + newMasterKey=$(echo ${oldMasterKey:0:32}) + cp ${masterKeyDir}/master.key ${masterKeyDir}/backup_master.key + logger "Original masterKey is backed up : ${masterKeyDir}/backup_master.key" + rm -rf ${masterKeyDir}/master.key + echo ${newMasterKey} > ${masterKeyDir}/master.key + logger "masterKey is trimmed : ${masterKeyDir}/master.key" + fi +} + +copyDirectories () { + + copyDataDirectories "copyFiles" "full" + copyDataDirectories "copyUniqueFiles" "unique" + copyDataDirectories "copySpecificFiles" "specific" + copyDataDirectories "copyPatternMatchingFiles" "patternFiles" +} + +symlinkDir () { + local source="$1" + local target="$2" + local targetDir= + local basename= + local targetParentDir= + + targetDir="$(dirname "${target}")" + if [[ "${targetDir}" == "${source}" ]]; then + # symlink the sub directories + createDirectory "${NEW_DATA_DIR}/${target}" "Warning" + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" "subDir" + basename="$(basename "${target}")" + cd "${NEW_DATA_DIR}/${target}" && rm -f "${basename}" + fi + else + targetParentDir="$(dirname "${NEW_DATA_DIR}/${target}")" + createDirectory "${targetParentDir}" "Warning" + if [[ "$(checkDirExists "${targetParentDir}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" + fi + fi +} + +symlinkOperation () { + local source="$1" + local target="$2" + local check=false + local targetLink= + local date= + + # Check if source is a link and do symlink + if [[ -L "${OLD_DATA_DIR}/${source}" ]]; then + targetLink=$(readlink -f "${OLD_DATA_DIR}/${source}") + symlinkOnExist "${targetLink}" "${NEW_DATA_DIR}/${target}" + else + # check if source is directory and do symlink + if [[ "$(checkDirExists "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path to symlink" + return + fi + if [[ "$(checkDirContents "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "No contents found in [${OLD_DATA_DIR}/${source}] to symlink" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" != "true" ]]; then + logger "Target directory [${NEW_DATA_DIR}/${target}] does not exist to create symlink, creating it" + symlinkDir "${source}" "${target}" + else + rm -rf "${NEW_DATA_DIR}/${target}" && check=true || check=false + [[ "${check}" == "false" ]] && warn "Failed to remove contents in [${NEW_DATA_DIR}/${target}/]" + symlinkDir "${source}" "${target}" + fi + fi +} +# Creates a symlink path - Source directory to which the symbolic link should point. +symlinkDirectories () { + local linkFiles= + local map= + local source= + local target= + + retrieveYamlValue "migration.linkFiles" "linkFiles" "Skip" + linkFiles="${YAML_VALUE}" + if [[ -z "${linkFiles}" ]]; then + return + fi + bannerSection "SYMLINK DIRECTORIES" + retrieveYamlValue "migration.linkFiles.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + logger "Symlink directory [${NEW_DATA_DIR}/${target}] to old [${OLD_DATA_DIR}/${source}]" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + symlinkOperation "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +updateConnectionString () { + local yamlPath="$1" + local value="$2" + local mongoPath="shared.mongo.url" + local rabbitmqPath="shared.rabbitMq.url" + local postgresPath="shared.database.url" + local redisPath="shared.redis.connectionString" + local mongoConnectionString="mongo.connectionString" + local sourceKey= + local hostIp=$(io_getPublicHostIP) + local hostKey= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + # Replace @postgres:,@mongodb:,@rabbitmq:,@redis: to @{hostIp}: (Compose Installer) + hostKey="@${hostIp}:" + case $yamlPath in + ${postgresPath}) + sourceKey="@postgres:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoPath}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${rabbitmqPath}) + sourceKey="@rabbitmq:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${redisPath}) + sourceKey="@redis:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoConnectionString}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + esac + fi + echo -n "${value}" +} + +yamlMigrate () { + local entry="$1" + local sourceFile="$2" + local value= + local yamlPath= + local key= + yamlPath="$(getFirstEntry "${entry}")" + key="$(getSecondEntry "${entry}")" + if [[ -z "${key}" ]]; then + warn "key is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + getYamlValue "${key}" "${sourceFile}" "false" + value="${YAML_VALUE}" + if [[ ! -z "${value}" ]]; then + value=$(updateConnectionString "${yamlPath}" "${value}") + fi + if [[ "${PRODUCT}" == "artifactory" ]]; then + replicatorProfiling + fi + if [[ -z "${value}" ]]; then + logger "No value for [${key}] in [${sourceFile}]" + else + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the key [${key}] in system.yaml" + fi +} + +migrateYamlFile () { + local files= + local filePath= + local fileName= + local sourceFile= + local map= + retrieveYamlValue "migration.yaml.files" "files" "Skip" + files="${YAML_VALUE}" + if [[ -z "${files}" ]]; then + return + fi + bannerSection "MIGRATION OF YAML FILES" + for file in $files; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.yaml.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.yaml.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + sourceFile="${NEW_DATA_DIR}/${filePath}/${fileName}" + if [[ "$(checkFileExists "${sourceFile}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + retrieveYamlValue "migration.yaml.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + yamlMigrate "${entry}" "${sourceFile}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done + else + logger "File [${fileName}] is not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} +# updates the key and value in system.yaml +updateYamlKeyValue () { + local entry="$1" + local value= + local yamlPath= + local key= + + yamlPath="$(getFirstEntry "${entry}")" + value="$(getSecondEntry "${entry}")" + if [[ -z "${value}" ]]; then + warn "value is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value [${value}] in system.yaml" +} + +updateSystemYamlFile () { + local updateYaml= + local map= + + retrieveYamlValue "migration.updateSystemYaml" "updateYaml" "Skip" + updateSystemYaml="${YAML_VALUE}" + if [[ -z "${updateSystemYaml}" ]]; then + return + fi + bannerSection "UPDATE SYSTEM YAML FILE WITH KEY AND VALUES" + retrieveYamlValue "migration.updateSystemYaml.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ -z "${map}" ]]; then + return + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + updateYamlKeyValue "${entry}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done +} + +backupFiles_hook () { + logSilly "Method ${FUNCNAME[0]}" +} + +backupDirectory () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyOnContentExist "${targetDir}" "${backupDirectory}/${dir}" "full" + fi +} + +removeOldDirectory () { + local backupDir="$1" + local entry="$2" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${entry}" "${OLD_DATA_DIR}/${entry}")" + local outputCheckDirExists="$(checkDirExists "${targetDir}")" + if [[ "${outputCheckDirExists}" != "true" ]]; then + logger "No [${targetDir}] directory found to delete" + echo ""; + return + fi + backupDirectory "${backupDir}" "${entry}" "${targetDir}" + rm -rf "${targetDir}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed directory [${targetDir}]" + [[ "${check}" == "false" ]] && warn "Failed to remove directory [${targetDir}]" + echo ""; +} + +cleanUpOldDataDirectories () { + local cleanUpOldDataDir= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldDataDir" "cleanUpOldDataDir" "Skip" + cleanUpOldDataDir="${YAML_VALUE}" + if [[ -z "${cleanUpOldDataDir}" ]]; then + return + fi + bannerSection "CLEAN UP OLD DATA DIRECTORIES" + retrieveYamlValue "migration.cleanUpOldDataDir.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old data configurations are backedup in [${backupDir}] directory ******" + backupFiles_hook "${backupDir}/${PRODUCT}" + for entry in $map; + do + removeOldDirectory "${backupDir}" "${entry}" + done +} + +backupFiles () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local fileName="$4" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyCmd "${targetDir}/${fileName}" "${backupDirectory}/${dir}" "specific" + fi +} + +removeOldFiles () { + local backupDir="$1" + local directoryName="$2" + local fileName="$3" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${directoryName}" "${OLD_DATA_DIR}/${directoryName}")" + local outputCheckFileExists="$(checkFileExists "${targetDir}/${fileName}")" + if [[ "${outputCheckFileExists}" != "true" ]]; then + logger "No [${targetDir}/${fileName}] file found to delete" + return + fi + backupFiles "${backupDir}" "${directoryName}" "${targetDir}" "${fileName}" + rm -f "${targetDir}/${fileName}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed file [${targetDir}/${fileName}]" + [[ "${check}" == "false" ]] && warn "Failed to remove file [${targetDir}/${fileName}]" + echo ""; +} + +cleanUpOldFiles () { + local cleanUpFiles= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldFiles" "cleanUpOldFiles" "Skip" + cleanUpOldFiles="${YAML_VALUE}" + if [[ -z "${cleanUpOldFiles}" ]]; then + return + fi + bannerSection "CLEAN UP OLD FILES" + retrieveYamlValue "migration.cleanUpOldFiles.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old files are backedup in [${backupDir}] directory ******" + for entry in $map; + do + local outputCheckMapEntry="$(checkMapEntry "${entry}")" + if [[ "${outputCheckMapEntry}" != "true" ]]; then + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e directoryName=fileName" + fi + local fileName="$(getSecondEntry "${entry}")" + local directoryName="$(getFirstEntry "${entry}")" + [[ -z "${fileName}" ]] && warn "File name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${directoryName}" ]] && warn "Directory name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + removeOldFiles "${backupDir}" "${directoryName}" "${fileName}" + echo ""; + done +} + +startMigration () { + bannerSection "STARTING MIGRATION" +} + +endMigration () { + bannerSection "MIGRATION COMPLETED SUCCESSFULLY" +} + +initialize () { + setAppDir + _pauseExecution "setAppDir" + initHelpers + _pauseExecution "initHelpers" + checkMigrationInfoYaml + _pauseExecution "checkMigrationInfoYaml" + getProduct + _pauseExecution "getProduct" + getDataDir + _pauseExecution "getDataDir" +} + +main () { + case $PRODUCT in + artifactory) + migrateArtifactory + ;; + distribution) + migrateDistribution + ;; + xray) + migrationXray + ;; + esac + exit 0 +} + +# Ensures meta data is logged +LOG_BEHAVIOR_ADD_META="$FLAG_Y" + + +migrateResolveDerbyPath () { + local key="$1" + local value="$2" + + if [[ "${key}" == "url" && "${value}" == *"db.home"* ]]; then + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + derbyPath="/opt/jfrog/artifactory/var/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + else + derbyPath="${NEW_DATA_DIR}/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + fi + fi + echo "${value}" +} + +migrateResolveHaDirPath () { + local key="$1" + local value="$2" + + if [[ "${INSTALLER}" == "${RPM_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" || "${INSTALLER}" == "${DEB_TYPE}" ]]; then + if [[ "${key}" == "artifactory.ha.data.dir" || "${key}" == "artifactory.ha.backup.dir" ]]; then + value=$(checkPathResolver "${value}") + fi + fi + echo "${value}" +} +updatePostgresUrlString_Hook () { + local yamlPath="$1" + local value="$2" + local hostIp=$(io_getPublicHostIP) + local sourceKey="//postgresql:" + if [[ "${yamlPath}" == "shared.database.url" ]]; then + value=$(io_replaceString "${value}" "${sourceKey}" "//${hostIp}:" "#") + fi + echo "${value}" +} +# Check Artifactory product version +checkArtifactoryVersion () { + local minProductVersion="6.0.0" + local maxProductVersion="7.0.0" + local propertyInDocker="ARTIFACTORY_VERSION" + local property="artifactory.version" + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + local newfilePath="${APP_DIR}/../.env" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + else + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="/etc/opt/jfrog/artifactory/artifactory.properties" + fi + + getProductVersion "${minProductVersion}" "${maxProductVersion}" "${newfilePath}" "${oldfilePath}" "${propertyInDocker}" "${property}" +} + +getCustomDataDir_hook () { + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Fail" + OLD_DATA_DIR="${YAML_VALUE}" +} + +# Get protocol value of connector +getXmlConnectorProtocol () { + local i="$1" + local filePath="$2" + local fileName="$3" + local protocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@protocol' ${filePath}/${fileName} 2>/dev/null |awk -F"=" '{print $2}' | tr -d '"') + echo -e "${protocolValue}" +} + +# Get all attributes of connector +getXmlConnectorAttributes () { + local i="$1" + local filePath="$2" + local fileName="$3" + local connectorAttributes=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@*' ${filePath}/${fileName} 2>/dev/null) + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + echo "${connectorAttributes}" +} + +# Get port value of connector +getXmlConnectorPort () { + local i="$1" + local filePath="$2" + local fileName="$3" + local portValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@port' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${portValue}" +} + +# Get maxThreads value of connector +getXmlConnectorMaxThreads () { + local i="$1" + local filePath="$2" + local fileName="$3" + local maxThreadValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@maxThreads' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${maxThreadValue}" +} +# Get sendReasonPhrase value of connector +getXmlConnectorSendReasonPhrase () { + local i="$1" + local filePath="$2" + local fileName="$3" + local sendReasonPhraseValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sendReasonPhrase' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${sendReasonPhraseValue}" +} +# Get relaxedPathChars value of connector +getXmlConnectorRelaxedPathChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedPathCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedPathChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedPathCharsValue=$(io_trim "${relaxedPathCharsValue}") + echo -e "${relaxedPathCharsValue}" +} +# Get relaxedQueryChars value of connector +getXmlConnectorRelaxedQueryChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedQueryCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedQueryChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedQueryCharsValue=$(io_trim "${relaxedQueryCharsValue}") + echo -e "${relaxedQueryCharsValue}" +} + +# Updating system.yaml with Connector port +setConnectorPort () { + local yamlPath="$1" + local valuePort="$2" + local portYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${valuePort}" ]]; then + warn "port value is empty, could not migrate to system.yaml" + return + fi + ## Getting port yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" portYamlPath "Warning" + portYamlPath="${YAML_VALUE}" + if [[ -z "${portYamlPath}" ]]; then + return + fi + setSystemValue "${portYamlPath}" "${valuePort}" "${SYSTEM_YAML_PATH}" + logger "Setting [${portYamlPath}] with value [${valuePort}] in system.yaml" +} + +# Updating system.yaml with Connector maxThreads +setConnectorMaxThread () { + local yamlPath="$1" + local threadValue="$2" + local maxThreadYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${threadValue}" ]]; then + return + fi + ## Getting max Threads yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" maxThreadYamlPath "Warning" + maxThreadYamlPath="${YAML_VALUE}" + if [[ -z "${maxThreadYamlPath}" ]]; then + return + fi + setSystemValue "${maxThreadYamlPath}" "${threadValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${maxThreadYamlPath}] with value [${threadValue}] in system.yaml" +} + +# Updating system.yaml with Connector sendReasonPhrase +setConnectorSendReasonPhrase () { + local yamlPath="$1" + local sendReasonPhraseValue="$2" + local sendReasonPhraseYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${sendReasonPhraseValue}" ]]; then + return + fi + ## Getting sendReasonPhrase yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" sendReasonPhraseYamlPath "Warning" + sendReasonPhraseYamlPath="${YAML_VALUE}" + if [[ -z "${sendReasonPhraseYamlPath}" ]]; then + return + fi + setSystemValue "${sendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${sendReasonPhraseYamlPath}] with value [${sendReasonPhraseValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedPathChars +setConnectorRelaxedPathChars () { + local yamlPath="$1" + local relaxedPathCharsValue="$2" + local relaxedPathCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedPathCharsValue}" ]]; then + return + fi + ## Getting relaxedPathChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedPathCharsYamlPath "Warning" + relaxedPathCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedPathCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedPathCharsYamlPath}" "${relaxedPathCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedPathCharsYamlPath}] with value [${relaxedPathCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedQueryChars +setConnectorRelaxedQueryChars () { + local yamlPath="$1" + local relaxedQueryCharsValue="$2" + local relaxedQueryCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedQueryCharsValue}" ]]; then + return + fi + ## Getting relaxedQueryChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedQueryCharsYamlPath "Warning" + relaxedQueryCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedQueryCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedQueryCharsYamlPath}" "${relaxedQueryCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedQueryCharsYamlPath}] with value [${relaxedQueryCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connectors configurations +setConnectorExtraConfig () { + local yamlPath="$1" + local connectorAttributes="$2" + local extraConfigPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${connectorAttributes}" ]]; then + return + fi + ## Getting extraConfig yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConfig "Warning" + extraConfigPath="${YAML_VALUE}" + if [[ -z "${extraConfigPath}" ]]; then + return + fi + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setSystemValue "${extraConfigPath}" "${connectorAttributes}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConfigPath}] with connector attributes in system.yaml" +} + +# Updating system.yaml with extra Connectors +setExtraConnector () { + local yamlPath="$1" + local extraConnector="$2" + local extraConnectorYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${extraConnector}" ]]; then + return + fi + ## Getting extraConnecotr yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConnectorYamlPath "Warning" + extraConnectorYamlPath="${YAML_VALUE}" + if [[ -z "${extraConnectorYamlPath}" ]]; then + return + fi + getYamlValue "${extraConnectorYamlPath}" "${SYSTEM_YAML_PATH}" "false" + local connectorExtra="${YAML_VALUE}" + if [[ -z "${connectorExtra}" ]]; then + setSystemValue "${extraConnectorYamlPath}" "${extraConnector}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + else + setSystemValue "${extraConnectorYamlPath}" "\"${connectorExtra} ${extraConnector}\"" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + fi +} + +# Migrate extra connectors to system.yaml +migrateExtraConnectors () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local excludeDefaultPort="$4" + local i="$5" + local extraConfig= + local extraConnector= + if [[ "${excludeDefaultPort}" == "yes" ]]; then + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" && "${portValue}" != "${DEFAULT_RT_PORT}" ]] || continue + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + done + else + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + fi +} + +# Migrate connector configurations +migrateConnectorConfig () { + local i="$1" + local protocolType="$2" + local portValue="$3" + local connectorPortYamlPath="$4" + local connectorMaxThreadYamlPath="$5" + local connectorAttributesYamlPath="$6" + local filePath="$7" + local fileName="$8" + local connectorSendReasonPhraseYamlPath="$9" + local connectorRelaxedPathCharsYamlPath="${10}" + local connectorRelaxedQueryCharsYamlPath="${11}" + + # migrate port + setConnectorPort "${connectorPortYamlPath}" "${portValue}" + + # migrate maxThreads + local maxThreadValue=$(getXmlConnectorMaxThreads "$i" "${filePath}" "${fileName}") + setConnectorMaxThread "${connectorMaxThreadYamlPath}" "${maxThreadValue}" + + # migrate sendReasonPhrase + local sendReasonPhraseValue=$(getXmlConnectorSendReasonPhrase "$i" "${filePath}" "${fileName}") + setConnectorSendReasonPhrase "${connectorSendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" + + # migrate relaxedPathChars + local relaxedPathCharsValue=$(getXmlConnectorRelaxedPathChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedPathChars "${connectorRelaxedPathCharsYamlPath}" "\"${relaxedPathCharsValue}\"" + # migrate relaxedQueryChars + local relaxedQueryCharsValue=$(getXmlConnectorRelaxedQueryChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedQueryChars "${connectorRelaxedQueryCharsYamlPath}" "\"${relaxedQueryCharsValue}\"" + + # migrate all attributes to extra config except port , maxThread , sendReasonPhrase ,relaxedPathChars and relaxedQueryChars + local connectorAttributes=$(getXmlConnectorAttributes "$i" "${filePath}" "${fileName}") + connectorAttributes=$(echo "${connectorAttributes}" | sed 's/port="'${portValue}'"//g' | sed 's/maxThreads="'${maxThreadValue}'"//g' | sed 's/sendReasonPhrase="'${sendReasonPhraseValue}'"//g' | sed 's/relaxedPathChars="\'${relaxedPathCharsValue}'\"//g' | sed 's/relaxedQueryChars="\'${relaxedQueryCharsValue}'\"//g') + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setConnectorExtraConfig "${connectorAttributesYamlPath}" "${connectorAttributes}" +} + +# Check for default port 8040 and 8081 in connectors and migrate +migrateConnectorPort () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + local connectorPortYamlPath="$5" + local connectorMaxThreadYamlPath="$6" + local connectorAttributesYamlPath="$7" + local connectorSendReasonPhraseYamlPath="$8" + local connectorRelaxedPathCharsYamlPath="$9" + local connectorRelaxedQueryCharsYamlPath="${10}" + local portYamlPath= + local maxThreadYamlPath= + local status= + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" == *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + RT_DEFAULTPORT_STATUS=success + else + AC_DEFAULTPORT_STATUS=success + fi + migrateConnectorConfig "${i}" "${protocolType}" "${portValue}" "${connectorPortYamlPath}" "${connectorMaxThreadYamlPath}" "${connectorAttributesYamlPath}" "${filePath}" "${fileName}" "${connectorSendReasonPhraseYamlPath}" "${connectorRelaxedPathCharsYamlPath}" "${connectorRelaxedQueryCharsYamlPath}" + done +} + +# migrate to extra, connector having default port and protocol is AJP +migrateDefaultPortIfAjp () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + done + +} + +# Comparing max threads in connectors +compareMaxThreads () { + local firstConnectorMaxThread="$1" + local firstConnectorNode="$2" + local secondConnectorMaxThread="$3" + local secondConnectorNode="$4" + local filePath="$5" + local fileName="$6" + + # choose higher maxThreads connector as Artifactory. + if [[ "${firstConnectorMaxThread}" -gt ${secondConnectorMaxThread} || "${firstConnectorMaxThread}" -eq ${secondConnectorMaxThread} ]]; then + # maxThread is higher in firstConnector, + # Taking firstConnector as Artifactory and SecondConnector as Access + # maxThread is equal in both connector,considering firstConnector as Artifactory and SecondConnector as Access + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + else + # maxThread is higher in SecondConnector, + # Taking SecondConnector as Artifactory and firstConnector as Access + local rtPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +# Check max threads exist to compare +maxThreadsExistToCompare () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local firstConnectorMaxThread= + local secondConnectorMaxThread= + local firstConnectorNode= + local secondConnectorNode= + local status=success + local firstnode=fail + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ ${protocolType} == *AJP* ]]; then + # Migrate Connectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + fi + # store maxthreads value of each connector + if [[ ${firstnode} == "fail" ]]; then + firstConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + firstConnectorNode="${i}" + firstnode=success + else + secondConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + secondConnectorNode="${i}" + fi + done + [[ -z "${firstConnectorMaxThread}" ]] && status=fail + [[ -z "${secondConnectorMaxThread}" ]] && status=fail + # maxThreads is set, now compare MaxThreads + if [[ "${status}" == "success" ]]; then + compareMaxThreads "${firstConnectorMaxThread}" "${firstConnectorNode}" "${secondConnectorMaxThread}" "${secondConnectorNode}" "${filePath}" "${fileName}" + else + # Assume first connector is RT, maxThreads is not set in both connectors + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +migrateExtraBasedOnNonAjpCount () { + local nonAjpCount="$1" + local filePath="$2" + local fileName="$3" + local connectorCount="$4" + local i="$5" + + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ "${protocolType}" == *AJP* ]]; then + if [[ "${nonAjpCount}" -eq 1 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + else + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + continue + fi + fi +} + +# find RT and AC Connector +findRtAndAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local initialAjpCount=0 + local nonAjpCount=0 + + # get the count of non AJP + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] || continue + nonAjpCount=$((initialAjpCount+1)) + initialAjpCount="${nonAjpCount}" + done + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access and artifactory connectors + # Mark port as 8040 for access + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + done + elif [[ "${nonAjpCount}" -eq 2 ]]; then + # compare maxThreads in both connectors + maxThreadsExistToCompare "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${nonAjpCount}" -gt 2 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # setting with default port in system.yaml + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# get the count of non AJP +getCountOfNonAjp () { + local port="$1" + local connectorCount="$2" + local filePath=$3 + local fileName=$4 + local initialNonAjpCount=0 + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${port}" ]] || continue + [[ "${protocolType}" != *AJP* ]] || continue + local nonAjpCount=$((initialNonAjpCount+1)) + initialNonAjpCount="${nonAjpCount}" + done + echo -e "${nonAjpCount}" +} + +# Find for access connector +findAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_RT_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access connector and mark port as that of connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take RT properties into access with 8040 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add RT connector details as access connector and mark port as 8040 + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# Find for artifactory connector +findRtConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_ACCESS_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as RT connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take access properties into artifactory with 8081 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add access connector details as RT connector and mark as ${DEFAULT_RT_PORT} + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +checkForTlsConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + local sslProtocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sslProtocol' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${sslProtocolValue}" == "TLS" ]]; then + bannerImportant "NOTE: Ignoring TLS connector during migration, modify the system yaml to enable TLS. Original server.xml is saved in path [${filePath}/${fileName}]" + TLS_CONNECTOR_EXISTS=${FLAG_Y} + continue + fi + done +} + +# set custom tomcat server Listeners to system.yaml +setListenerConnector () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + for ((i = 1 ; i <= "${listenerCount}" ; i++)) + do + local listenerConnector=$($LIBXML2_PATH --xpath '//Server/Listener['$i']' ${filePath}/${fileName} 2>/dev/null) + local listenerClassName=$($LIBXML2_PATH --xpath '//Server/Listener['$i']/@className' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${listenerClassName}" == *Apr* ]]; then + setExtraConnector "${EXTRA_LISTENER_CONFIG_YAMLPATH}" "${listenerConnector}" + fi + done +} +# add custom tomcat server Listeners +addTomcatServerListeners () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + if [[ "${listenerCount}" == "0" ]]; then + logger "No listener connectors found in the [${filePath}/${fileName}],skipping migration of listener connectors" + else + setListenerConnector "${filePath}" "${fileName}" "${listenerCount}" + setSystemValue "${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}" "true" "${SYSTEM_YAML_PATH}" + logger "Setting [${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}] with value [true] in system.yaml" + fi +} + +# server.xml migration operations +xmlMigrateOperation () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local listenerCount="$4" + RT_DEFAULTPORT_STATUS=fail + AC_DEFAULTPORT_STATUS=fail + TLS_CONNECTOR_EXISTS=${FLAG_N} + + # Check for connector with TLS , if found ignore migrating it + checkForTlsConnector "${filePath}" "${fileName}" "${connectorCount}" + if [[ "${TLS_CONNECTOR_EXISTS}" == "${FLAG_Y}" ]]; then + return + fi + addTomcatServerListeners "${filePath}" "${fileName}" "${listenerCount}" + # Migrate RT default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + # Migrate to extra if RT default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" + # Migrate AC default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + # Migrate to extra if access default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" + + if [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # RT and AC default port found + logger "Artifactory 8081 and Access 8040 default port are found" + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # Only AC default port found,find RT connector + logger "Found Access default 8040 port" + findRtConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # Only RT default port found,find AC connector + logger "Found Artifactory default 8081 port" + findAcConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # RT and AC default port not found, find connector + logger "Artifactory 8081 and Access 8040 default port are not found" + findRtAndAcConnector "${filePath}" "${fileName}" "${connectorCount}" + fi +} + +# get count of connectors +getXmlConnectorCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Service/Connector)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# get count of listener connectors +getTomcatServerListenersCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Listener)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# Migrate server.xml configuration to system.yaml +migrateXmlFile () { + local xmlFiles= + local fileName= + local filePath= + local sourceFilePath= + DEFAULT_ACCESS_PORT="8040" + DEFAULT_RT_PORT="8081" + AC_PORT_YAMLPATH="migration.xmlFiles.serverXml.access.port" + AC_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.access.maxThreads" + AC_SENDREASONPHRASE_YAMLPATH="migration.xmlFiles.serverXml.access.sendReasonPhrase" + AC_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.access.extraConfig" + RT_PORT_YAMLPATH="migration.xmlFiles.serverXml.artifactory.port" + RT_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.artifactory.maxThreads" + RT_SENDREASONPHRASE_YAMLPATH='migration.xmlFiles.serverXml.artifactory.sendReasonPhrase' + RT_RELAXEDPATHCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedPathChars' + RT_RELAXEDQUERYCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedQueryChars' + RT_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.artifactory.extraConfig" + ROUTER_PORT_YAMLPATH="migration.xmlFiles.serverXml.router.port" + EXTRA_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.config" + EXTRA_LISTENER_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.listener" + RT_TOMCAT_HTTPSCONNECTOR_ENABLED="artifactory.tomcat.httpsConnector.enabled" + + retrieveYamlValue "migration.xmlFiles" "xmlFiles" "Skip" + xmlFiles="${YAML_VALUE}" + if [[ -z "${xmlFiles}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF XML FILES" + retrieveYamlValue "migration.xmlFiles.serverXml.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + if [[ -z "${fileName}" ]]; then + return + fi + bannerSubSection "Processing Migration of $fileName" + retrieveYamlValue "migration.xmlFiles.serverXml.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + if [[ -z "${filePath}" ]]; then + return + fi + # prepend NEW_DATA_DIR only if filePath is relative path + sourceFilePath=$(prependDir "${filePath}" "${NEW_DATA_DIR}/${filePath}") + if [[ "$(checkFileExists "${sourceFilePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] is found in path [${sourceFilePath}]" + local connectorCount=$(getXmlConnectorCount "${sourceFilePath}" "${fileName}") + if [[ "${connectorCount}" == "0" ]]; then + logger "No connectors found in the [${filePath}/${fileName}],skipping migration of xml configuration" + return + fi + local listenerCount=$(getTomcatServerListenersCount "${sourceFilePath}" "${fileName}") + xmlMigrateOperation "${sourceFilePath}" "${fileName}" "${connectorCount}" "${listenerCount}" + else + logger "File [${fileName}] is not found in path [${sourceFilePath}] to migrate" + fi +} + +compareArtifactoryUser () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + + if [[ "${oldPropertyValue}" != "${newPropertyValue}" ]]; then + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" + else + logger "No change in property [${property}] value in [${sourceFile}] to migrate" + fi +} + +migrateReplicator () { + local property="$1" + local oldPropertyValue="$2" + local yamlPath="$3" + + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" +} + +compareJavaOptions () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + local oldJavaOption= + local newJavaOption= + local extraJavaOption= + local check=false + local success=true + local status=true + + oldJavaOption=$(echo "${oldPropertyValue}" | awk 'BEGIN{FS=OFS="\""}{for(i=2;i> /etc/hosts" + +3. Launch jconsole: +jconsole {{ template "artifactory.fullname" . }}:{{ .Values.artifactory.javaOpts.jmx.port }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/_helpers.tpl b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/_helpers.tpl new file mode 100644 index 000000000..d683ca231 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/_helpers.tpl @@ -0,0 +1,85 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory.nginx.name" -}} +{{- default .Chart.Name .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "artifactory.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified replicator app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory.replicator.fullname" -}} +{{- if .Values.artifactory.replicator.ingress.name -}} +{{- .Values.artifactory.replicator.ingress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified nginx name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "artifactory.nginx.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nginx.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Scheme (http/https) based on Access TLS enabled/disabled +*/}} +{{- define "artifactory.scheme" -}} +{{- if .Values.access.accessConfig.security.tls -}} +{{- printf "%s" "https" -}} +{{- else -}} +{{- printf "%s" "http" -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/admin-bootstrap-creds.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/admin-bootstrap-creds.yaml new file mode 100644 index 000000000..c8bf2eb16 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/admin-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} +{{- if .Values.artifactory.admin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "%s@%s=%s" .Values.artifactory.admin.username .Values.artifactory.admin.ip .Values.artifactory.admin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-access-config.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-access-config.yaml new file mode 100644 index 000000000..700e65608 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-access-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.access.accessConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-access-config + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + access.config.import.yml: | +{{ tpl (toYaml .Values.access.accessConfig) . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-binarystore-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-binarystore-secret.yaml new file mode 100644 index 000000000..bbfbdf3be --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-binarystore + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-configmaps.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-configmaps.yaml new file mode 100644 index 000000000..359fa07d2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-configmaps + labels: + app: {{ template "artifactory.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-custom-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-custom-secrets.yaml new file mode 100644 index 000000000..ab2e8324c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.artifactory.customSecrets }} +{{- range .Values.artifactory.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: + app: "{{ template "artifactory.name" $ }}" + chart: "{{ template "artifactory.chart" $ }}" + component: "{{ $.Values.artifactory.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-database-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-database-secrets.yaml new file mode 100644 index 000000000..9edf87dc1 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-database-secrets.yaml @@ -0,0 +1,22 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-database-creds + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-installer-info.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-installer-info.yaml new file mode 100644 index 000000000..f2e2c0f5b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-installer-info.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-installer-info + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + {{ tpl .Values.installerInfo . }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-license-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-license-secret.yaml new file mode 100644 index 000000000..d37ed1e4b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" $ }}-license + labels: + app: {{ template "artifactory.name" $ }} + chart: {{ template "artifactory.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-migration-scripts.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-migration-scripts.yaml new file mode 100644 index 000000000..edca83ed3 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-migration-scripts.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-migration-scripts + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + migrate.sh: | +{{ .Files.Get "files/migrate.sh" | indent 4 }} + migrationHelmInfo.yaml: | +{{ .Files.Get "files/migrationHelmInfo.yaml" | indent 4 }} + migrationStatus.sh: | +{{ .Files.Get "files/migrationStatus.sh" | indent 4 }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-networkpolicy.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-networkpolicy.yaml new file mode 100644 index 000000000..da6fd178a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory.name" $ }} + chart: {{ template "artifactory.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-priority-class.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-priority-class.yaml new file mode 100644 index 000000000..9eb94b35b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-role.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-role.yaml new file mode 100644 index 000000000..db01f235f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-rolebinding.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-rolebinding.yaml new file mode 100644 index 000000000..dfb42258f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory.fullname" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-secrets.yaml new file mode 100644 index 000000000..a1d772429 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if and .Values.artifactory.masterKey (not .Values.artifactory.masterKeySecretName) }} + master-key: {{ .Values.artifactory.masterKey | b64enc | quote }} + {{- end }} + {{- if and .Values.artifactory.joinKey (not .Values.artifactory.joinKeySecretName) }} + join-key: {{ .Values.artifactory.joinKey | b64enc | quote }} + {{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-service.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-service.yaml new file mode 100644 index 000000000..f1f2be4a9 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-service.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} +{{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: artifactory + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: ssh + {{- end }} + {{- with .Values.artifactory.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: jmx + {{- end }} + {{- end }} + selector: + app: {{ template "artifactory.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-serviceaccount.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-serviceaccount.yaml new file mode 100644 index 000000000..4e9c5fbb5 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.serviceAccountName" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-statefulset.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-statefulset.yaml new file mode 100644 index 000000000..3680caf0e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-statefulset.yaml @@ -0,0 +1,695 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory/CHANGELOG.md), pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true if you are upgrading from chart version which has postgresql version 9.6.x." .Values.databaseUpgradeReady | quote }} +{{- end }} +spec: + serviceName: {{ template "artifactory.name" . }} + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory.name" . }} + role: {{ template "artifactory.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + role: {{ template "artifactory.name" . }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.access.accessConfig }} + checksum/access-config: {{ include (print $.Template.BasePath "/artifactory-access-config.yaml") . | sha256sum }} + {{- end }} + {{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} + checksum/admin-creds: {{ include (print $.Template.BasePath "/admin-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.uid }} + initContainers: + {{- if .Values.artifactory.persistence.enabled }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found {{ .Values.artifactory.persistence.mountPath }}/data/.lock' + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + command: + - 'sh' + - '-c' + - > + echo "Preparing {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + subPath: {{ .Values.artifactory.admin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + command: + - '/bin/sh' + - '-c' + - > + sleep 30; + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + {{- if .Values.access.accessConfig }} + echo "Copy access.config.latest.yml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -fv /tmp/etc/access.config.import.yml {{ .Values.artifactory.persistence.mountPath }}/etc/access/access.config.import.yml; + {{- end }} + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + touch {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/reset_ca_keys; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Copying custom certificates to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + cp -fv /tmp/etc/tls.crt {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.crt; + cp -fv /tmp/etc/tls.key {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.private.key; + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.artifactory.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security; + echo -n ${ARTIFACTORY_JOIN_KEY} > {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security/join.key; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.artifactory.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + {{- end }} + env: + {{- if or .Values.artifactory.joinKey .Values.artifactory.joinKeySecretName}} + - name: ARTIFACTORY_JOIN_KEY + valueFrom: + secretKeyRef: + name: "{{ .Values.artifactory.joinKeySecretName | default (include "artifactory.fullname" .) }}" + key: join-key + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.artifactory.masterKeySecretName }} + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: "{{ .Values.artifactory.masterKeySecretName | default (include "artifactory.fullname" .) }}" + key: master-key + {{- end }} + volumeMounts: + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + - name: systemyaml + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- if .Values.access.accessConfig }} + - name: access-config + mountPath: "/tmp/etc/access.config.import.yml" + subPath: access.config.import.yml + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + mountPath: "/tmp/etc/tls.crt" + subPath: tls.crt + - name: access-certs + mountPath: "/tmp/etc/tls.key" + subPath: tls.key + {{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + echo "Setting ownership {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} on PVC {{ .Values.artifactory.customPersistentPodVolumeClaim.name }}" + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.uid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if or .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if .Values.artifactory.customInitContainers }} +{{ tpl .Values.artifactory.customInitContainers . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory' + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ default .Chart.AppVersion .Values.artifactory.image.version }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: '{{ .Values.artifactory.image.repository }}:{{ default .Chart.AppVersion .Values.artifactory.image.version }}' + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + if [ -d /artifactory_extra_conf ] && [ -d /artifactory_bootstrap ]; then + echo "Copying bootstrap config from /artifactory_extra_conf to /artifactory_bootstrap"; + cp -Lrfv /artifactory_extra_conf/ /artifactory_bootstrap/; + fi; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_bootstrap/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /artifactory_bootstrap/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + exec /entrypoint-artifactory.sh + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo; + {{- with .Values.artifactory.postStartCommand }} + {{ tpl . $ }} + {{- end }} + env: + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + {{- if .Values.artifactory.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.javaOpts.jmx.port }} + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + {{- end }} + volumeMounts: + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + mountPath: "/artifactory_bootstrap/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.lic" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if .Values.artifactory.customVolumeMounts }} +{{ tpl .Values.artifactory.customVolumeMounts . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: artifactory-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: artifactory-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if .Values.artifactory.customSidecarContainers }} +{{ tpl .Values.artifactory.customSidecarContainers . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.artifactory.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.artifactory.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-binarystore + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if and .Values.artifactory.persistence.enabled .Values.artifactory.persistence.existingClaim }} + - name: artifactory-volume + persistentVolumeClaim: + claimName: {{ .Values.artifactory.persistence.existingClaim }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory.fullname" . }}-configmaps + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory.fullname" . }}-license + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + secretName: {{ .Values.artifactory.admin.secret }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + persistentVolumeClaim: + claimName: {{ template "artifactory.fullname" . }}-data-pvc + - name: artifactory-backup + persistentVolumeClaim: + claimName: {{ template "artifactory.fullname" . }}-backup-pvc + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: artifactory-volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + - name: systemyaml + secret: + secretName: {{ template "artifactory.fullname" . }}-systemyaml + {{- if .Values.access.accessConfig }} + - name: access-config + secret: + secretName: {{ template "artifactory.fullname" . }}-access-config + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + secret: + secretName: {{ .Values.access.customCertificatesSecretName }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory.name" . }}-filebeat-config + {{- end }} + {{- if .Values.artifactory.customVolumes }} +{{ tpl .Values.artifactory.customVolumes . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} +{{- with .Values.artifactory.persistence }} + {{- if and .enabled (not .existingClaim) }} + volumeClaimTemplates: + - metadata: + name: artifactory-volume + spec: + {{- if .storageClassName }} + {{- if (eq "-" .storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .accessMode }}" ] + resources: + requests: + storage: {{ .size }} + {{- end }} +{{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-system-yaml.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-system-yaml.yaml new file mode 100644 index 000000000..e2fa58e86 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/artifactory-system-yaml.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-systemyaml + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/filebeat-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..8ceb67c01 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.name" . }}-filebeat-config + labels: + app: {{ template "artifactory.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/ingress.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/ingress.yaml new file mode 100644 index 000000000..47e2f5505 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/ingress.yaml @@ -0,0 +1,103 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- $ingressName := default ( include "artifactory.fullname" . ) .Values.ingress.name -}} +{{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $ingressName }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- if .Values.artifactory.replicator.enabled }} +--- +{{- $replicatorIngressName := default ( include "artifactory.replicator.fullname" . ) .Values.artifactory.replicator.ingress.name -}} + {{- if semverCompare ">=v1.14.0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 + {{- else }} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ $replicatorIngressName }} + labels: + app: "{{ template "artifactory.name" $ }}" + chart: "{{ template "artifactory.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.ingress.annotations }} + annotations: +{{ .Values.artifactory.replicator.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.ingress.hosts }} + {{- range $host := .Values.artifactory.replicator.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: /replicator/ + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: /artifactory/api/replication/replicate/file/streaming + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.ingress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/logger-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/logger-configmap.yaml new file mode 100644 index 000000000..f53f35e48 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-logger + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-artifactory-conf.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-artifactory-conf.yaml new file mode 100644 index 000000000..bd2ebea96 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-certificate-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-certificate-secret.yaml new file mode 100644 index 000000000..fa39459e7 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-conf.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-conf.yaml new file mode 100644 index 000000000..851eae247 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-deployment.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-deployment.yaml new file mode 100644 index 000000000..ec0307769 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-deployment.yaml @@ -0,0 +1,194 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: {{ .Values.nginx.kind }} +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: +{{- if eq .Values.nginx.kind "StatefulSet" }} + serviceName: {{ template "artifactory.nginx.fullname" . }} +{{- end }} +{{- if ne .Values.nginx.kind "DaemonSet" }} + replicas: {{ .Values.nginx.replicaCount }} +{{- end }} + selector: + matchLabels: + app: {{ template "artifactory.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: '{{ .Values.nginx.image.repository }}:{{ default .Chart.AppVersion .Values.nginx.image.version }}' + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.nginx.ssh.internalPort }} + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $image := .Values.logger.image.repository }} + {{- $tag := .Values.logger.image.tag }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: '{{ $image }}:{{ $tag }}' + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + resources: +{{ toYaml $.Values.nginx.loggersResources | indent 10 }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory.fullname" . }}-nginx-artifactory-conf + {{- end }} + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-nginx-certificate + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-pvc.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-pvc.yaml new file mode 100644 index 000000000..a110d8b24 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled ) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClass }} +{{- if (eq "-" .Values.nginx.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClass }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-service.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-service.yaml new file mode 100644 index 000000000..78d2a6f29 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/templates/nginx-service.yaml @@ -0,0 +1,73 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if or .Values.nginx.https.enabled .Values.nginx.service.ssloffload }} + - port: {{ .Values.nginx.https.externalPort }} + {{- if .Values.nginx.service.ssloffload }} + targetPort: {{ .Values.nginx.http.internalPort }} + {{- else }} + targetPort: {{ .Values.nginx.https.internalPort}} + {{- end }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.nginx.ssh.externalPort }} + targetPort: {{ .Values.nginx.ssh.internalPort }} + protocol: TCP + name: ssh + {{- end }} + selector: + app: {{ template "artifactory.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-large.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-large.yaml new file mode 100644 index 000000000..6399c5b8a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-large.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-medium.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-medium.yaml new file mode 100644 index 000000000..44aff82de --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-medium.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-small.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-small.yaml new file mode 100644 index 000000000..04368ae15 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values-small.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values.yaml new file mode 100644 index 000000000..9eee1703c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/charts/artifactory/values.yaml @@ -0,0 +1,1268 @@ +# Default values for artifactory. +# This is a YAML-formatted file. + +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# Common +initContainerImage: docker.bintray.io/alpine:3.12 + +installer: + type: + platform: + +installerInfo: '{"productId": "Helm_artifactory/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + +# For supporting pulling from private registries +imagePullSecrets: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + ## Service Account annotations + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/configuration-snippet: | + # proxy_pass_header Server; + # proxy_set_header X-JFrog-Override-Base-Url https://; + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory + +logger: + image: + repository: docker.bintray.io/busybox + tag: 1.31.1 + +# Artifactory +artifactory: + name: artifactory + # Note that by default we use appVersion to get image tag/version + image: + repository: docker.bintray.io/jfrog/artifactory-pro + # version: + pullPolicy: IfNotPresent + labels: {} + + # Create a priority class for the Artifactory pod or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 200 + extraConfig: 'acceptCount="100"' + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_bootstrap/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + # # Absolute path + # - source: /artifactory_bootstrap/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - access-audit.log + # - access-request.log + # - access-security-audit.log + # - access-service.log + # - artifactory-access.log + # - artifactory-event.log + # - artifactory-import-export.log + # - artifactory-request.log + # - artifactory-service.log + # - frontend-request.log + # - frontend-service.log + # - metadata-request.log + # - metadata-service.log + # - router-request.log + # - router-service.log + # - router-traefik.log + # - derby.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - tomcat-catalina.log + # - tomcat-localhost.log + + # Tomcat (catalina) loggers resources + catalinaLoggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 + ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + + ## Add custom init containers + customInitContainers: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: artifactory-volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # runAsUser: 0 + # fsGroup: 0 + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: artifactory-volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Artifactory requires a unique master key. + ## You can generate one with the command: "openssl rand -hex 32" + ## An initial one is auto generated by Artifactory on first startup. + # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + # masterKeySecretName: + + ## Join Key to connect other services to Artifactory + ## IMPORTANT: Setting this value overrides the existing joinKey + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + + # Add custom secrets - secret per file + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + + binarystore: + enabled: true + + ## admin allows to set the password for the default admin user. + ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate + admin: + ip: "127.0.0.1" + username: "admin" + password: + secret: + dataKey: + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + extraEnvironmentVariables: + # - name: SERVER_XML_ARTIFACTORY_PORT + # value: "8081" + # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS + # value: "200" + # - name: SERVER_XML_ACCESS_MAX_THREADS + # value: "50" + # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_ACCESS_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_EXTRA_CONNECTOR + # value: "" + # - name: DB_POOL_MAX_ACTIVE + # value: "100" + # - name: DB_POOL_MAX_IDLE + # value: "10" + # - name: MY_SECRET_ENV_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret-name + # key: my-secret-key + + systemYaml: | + shared: + logging: + consoleLog: + enabled: {{ .Values.artifactory.consoleLog }} + extraJavaOpts: > + -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }} + {{- with .Values.artifactory.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + {{- if or .Values.database.type .Values.postgresql.enabled }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}" + driver: org.postgresql.Driver + username: "{{ .Values.postgresql.postgresqlUsername }}" + {{- else }} + type: "{{ .Values.database.type }}" + driver: "{{ .Values.database.driver }}" + {{- end }} + {{- end }} + artifactory: + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }} + access: + database: + maxOpenConnections: {{ .Values.access.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.access.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.access.tomcat.connector.extraConfig }} + metadata: + database: + maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }} + {{- if .Values.artifactory.replicator.enabled }} + replicator: + enabled: true + {{- end }} + + annotations: {} + + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + + ## The following setting are to configure a dedicated Ingress object for Replicator service + replicator: + enabled: false + ingress: + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + terminationGracePeriodSeconds: 30 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 90 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + mountPath: "/var/opt/jfrog/artifactory" + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + ## Storage default size. Should be increased for production deployments. + size: 20Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + ## Cache default size. Should be increased for production deployments. + maxCacheSize: 5000000000 + cacheProviderDir: cache + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## aws-s3-v3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" -}} + + + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{- end }} + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{- end }} + + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + {{- end }} + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .maxConnections }} + {{ . }} + {{- end }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + {{- if .useInstanceCredentials }} + true + {{- else }} + false + {{- end }} + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + + ## For artifactory.persistence.type file-system + fileSystem: + cache: + enabled: false + + ## For artifactory.persistence.type google-storage + googleStorage: + endpoint: storage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-gcp" + identity: + credential: + path: "artifactory/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + maxConnections: 50 + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: AWS4-HMAC-SHA256 + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + ## Annotations for the Persistent Volume Claim + annotations: {} + ## Uncomment the following resources definitions or pass them from command line + ## to control the cpu and memory resources allocated by the Kubernetes cluster + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + corePoolSize: 8 + # other: "" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + ssh: + enabled: false + internalPort: 1339 + externalPort: 1339 + +access: + ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. + ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates + ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes. + ## This ensures that the node to node communication is done over TLS. + accessConfig: + security: + tls: false + + ## You can use a pre-existing secret by specifying customCertificatesSecretName + ## Example : Create a tls secret using `kubectl create secret tls --cert=ca.crt --key=ca.private.key` + # customCertificatesSecretName: + + ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key + # resetAccessCAKeys: false + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 50 + extraConfig: 'acceptCount="100"' + +metadata: + database: + maxOpenConnections: 80 + +# Nginx +nginx: + enabled: true + kind: Deployment + name: nginx + labels: {} + replicaCount: 1 + uid: 104 + gid: 107 + # Note that by default we use appVersion to get image tag/version + image: + repository: docker.bintray.io/jfrog/nginx-artifactory-pro + # version: + pullPolicy: IfNotPresent + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + + # Logs options + logs: + stderr: false + level: warn + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + + {{ if .Values.nginx.logs.stderr }} + error_log stderr {{ .Values.nginx.logs.level }}; + {{- else -}} + error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }}; + {{- end }} + pid /tmp/nginx.pid; + + {{- if .Values.artifactory.ssh.enabled }} + ## SSH Server Configuration + stream { + server { + listen {{ .Values.nginx.ssh.internalPort }}; + proxy_pass {{ include "artifactory.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }}; + } + } + {{- end }} + + events { + worker_connections 1024; + } + + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 128k; + proxy_buffers 40 128k; + proxy_busy_buffers_size 128k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; + + } + + + artifactoryConf: | + {{- if .Values.nginx.https.enabled }} + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + {{- end }} + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory.fullname" . }} {{ include "artifactory.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ . }} + {{- end -}} + {{- end -}}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + {{- if .Values.nginx.service.ssloffload}} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host; + {{- else }} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + {{- end }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + ssloffload: false + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + + ssh: + internalPort: 1339 + externalPort: 1339 + + # DEPRECATED: The following will be removed in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 120 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory-ha.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + nodeSelector: {} + + tolerations: [] + + affinity: {} + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the PostgreSQL dependency sub-chart +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 10.13.0-debian-10-r38 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlExtendedConf: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + master: + nodeSelector: {} + affinity: {} + tolerations: [] + slave: + nodeSelector: {} + affinity: {} + tolerations: [] + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## specify custom database details here or leave empty and Artifactory will use embedded derby +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "rds-artifactory" + # key: "db-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.5.1 + logstashUrl: "logstash:5044" + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/logo/jcr-logo.png b/charts/artifactory-jcr/artifactory-jcr/2.5.100/logo/jcr-logo.png new file mode 100644 index 000000000..e0ed038ca Binary files /dev/null and b/charts/artifactory-jcr/artifactory-jcr/2.5.100/logo/jcr-logo.png differ diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/questions.yml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/questions.yml new file mode 100644 index 000000000..9cde42870 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/questions.yml @@ -0,0 +1,271 @@ +questions: +# Advance Settings +- variable: artifactory.artifactory.masterKey + default: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + description: "Artifactory master key. For security reasons, we strongly recommend you generate your own master key using this command: 'openssl rand -hex 32'" + type: string + label: Artifactory master key + group: "Security Settings" + +# Container Images +- variable: defaultImage + default: true + description: "Use default Docker image" + label: Use Default Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: artifactory.artifactory.image.repository + default: "docker.bintray.io/jfrog/artifactory-jcr" + description: "JFrog Container Registry image name" + type: string + label: JFrog Container Registry Image Name + - variable: artifactory.artifactory.image.version + default: "7.6.3" + description: "JFrog Container Registry image tag" + type: string + label: JFrog Container Registry Image Tag + - variable: artifactory.imagePullSecrets + description: "Image Pull Secret" + type: string + label: Image Pull Secret + +# Services and LoadBalancing Settings +- variable: artifactory.ingress.enabled + default: false + description: "Expose app using Layer 7 Load Balancer - ingress" + type: boolean + label: Expose app using Layer 7 Load Balancer + show_subquestion_if: true + group: "Services and Load Balancing" + required: true + subquestions: + - variable: artifactory.ingress.hosts[0] + default: "xip.io" + description: "Hostname to your artifactory installation" + type: hostname + required: true + label: Hostname + +# Nginx Settings +- variable: artifactory.nginx.enabled + default: true + description: "Enable nginx server" + type: boolean + label: Enable Nginx Server + group: "Services and Load Balancing" + required: true + show_if: "artifactory.ingress.enabled=false" +- variable: artifactory.nginx.service.type + default: "LoadBalancer" + description: "Nginx service type" + type: enum + required: true + label: Nginx Service Type + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" + options: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" +- variable: artifactory.nginx.service.loadBalancerIP + default: "" + description: "Provide Static IP to configure with Nginx" + type: string + label: Config Nginx LoadBalancer IP + show_if: "artifactory.nginx.enabled=true&&artifactory.nginx.service.type=LoadBalancer&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" +- variable: artifactory.nginx.tlsSecretName + default: "" + description: "Provide SSL Secret name to configure with Nginx" + type: string + label: Config Nginx SSL Secret + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" +- variable: artifactory.nginx.customArtifactoryConfigMap + default: "" + description: "Provide configMap name to configure Nginx with custom `artifactory.conf`" + type: string + label: ConfigMap for Nginx Artifactory Config + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" + +# Database Settings +- variable: artifactory.postgresql.enabled + default: true + description: "Enable PostgreSQL" + type: boolean + required: true + label: Enable PostgreSQL + group: "Database Settings" + show_subquestion_if: true + subquestions: + - variable: artifactory.postgresql.postgresqlPassword + default: "" + description: "PostgreSQL password" + type: password + required: true + label: PostgreSQL Password + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.persistence.size + default: 20Gi + description: "PostgreSQL persistent volume size" + type: string + label: PostgreSQL Persistent Volume Size + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.persistence.storageClass + default: "" + description: "If undefined or null, uses the default StorageClass. Default to null" + type: storageclass + label: Default StorageClass for PostgreSQL + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.requests.cpu + default: "200m" + description: "PostgreSQL initial cpu request" + type: string + label: PostgreSQL Initial CPU Request + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.requests.memory + default: "500Mi" + description: "PostgreSQL initial memory request" + type: string + label: PostgreSQL Initial Memory Request + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.limits.cpu + default: "1" + description: "PostgreSQL cpu limit" + type: string + label: PostgreSQL CPU Limit + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.limits.memory + default: "1Gi" + description: "PostgreSQL memory limit" + type: string + label: PostgreSQL Memory Limit + show_if: "artifactory.postgresql.enabled=true" +- variable: artifactory.database.type + default: "postgresql" + description: "xternal database type (postgresql, mysql, oracle or mssql)" + type: enum + required: true + label: External Database Type + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" + options: + - "postgresql" + - "mysql" + - "oracle" + - "mssql" +- variable: artifactory.database.url + default: "" + description: "External database URL. If you set the url, leave host and port empty" + type: string + label: External Database URL + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.host + default: "" + description: "External database hostname" + type: string + label: External Database Hostname + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.port + default: "" + description: "External database port" + type: string + label: External Database Port + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.user + default: "" + description: "External database username" + type: string + label: External Database Username + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.password + default: "" + description: "External database password" + type: password + label: External Database Password + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" + +# Advance Settings +- variable: artifactory.advancedOptions + default: false + description: "Show advanced configurations" + label: Show Advanced Configurations + type: boolean + show_subquestion_if: true + group: "Advanced Options" + subquestions: + - variable: artifactory.artifactory.primary.resources.requests.cpu + default: "500m" + description: "Artifactory primary node initial cpu request" + type: string + label: Artifactory Primary Node Initial CPU Request + - variable: artifactory.artifactory.primary.resources.requests.memory + default: "1Gi" + description: "Artifactory primary node initial memory request" + type: string + label: Artifactory Primary Node Initial Memory Request + - variable: artifactory.artifactory.primary.javaOpts.xms + default: "1g" + description: "Artifactory primary node java Xms size" + type: string + label: Artifactory Primary Node Java Xms Size + - variable: artifactory.artifactory.primary.resources.limits.cpu + default: "2" + description: "Artifactory primary node cpu limit" + type: string + label: Artifactory Primary Node CPU Limit + - variable: artifactory.artifactory.primary.resources.limits.memory + default: "4Gi" + description: "Artifactory primary node memory limit" + type: string + label: Artifactory Primary Node Memory Limit + - variable: artifactory.artifactory.primary.javaOpts.xmx + default: "4g" + description: "Artifactory primary node java Xmx size" + type: string + label: Artifactory Primary Node Java Xmx Size + - variable: artifactory.artifactory.node.resources.requests.cpu + default: "500m" + description: "Artifactory member node initial cpu request" + type: string + label: Artifactory Member Node Initial CPU Request + - variable: artifactory.artifactory.node.resources.requests.memory + default: "2Gi" + description: "Artifactory member node initial memory request" + type: string + label: Artifactory Member Node Initial Memory Request + - variable: artifactory.artifactory.node.javaOpts.xms + default: "1g" + description: "Artifactory member node java Xms size" + type: string + label: Artifactory Member Node Java Xms Size + - variable: artifactory.artifactory.node.resources.limits.cpu + default: "2" + description: "Artifactory member node cpu limit" + type: string + label: Artifactory Member Node CPU Limit + - variable: artifactory.artifactory.node.resources.limits.memory + default: "4Gi" + description: "Artifactory member node memory limit" + type: string + label: Artifactory Member Node Memory Limit + - variable: artifactory.artifactory.node.javaOpts.xmx + default: "4g" + description: "Artifactory member node java Xmx size" + type: string + label: Artifactory Member Node Java Xmx Size + +# Internal Settings +- variable: installerInfo + default: '\{\"productId\": \"RancherHelm_artifactory-jcr/7.6.3\", \"features\": \[\{\"featureId\": \"Partner/ACC-007246\"\}\]\}' + type: string + group: "Internal Settings (Do not modify)" diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.lock b/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.lock new file mode 100644 index 000000000..8191ebbac --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: artifactory + repository: https://charts.jfrog.io/ + version: 10.0.12 +digest: sha256:a201c886d1f8e9e58f2b0e1b55d7a03fc225f3774233f1f786523963c57bab33 +generated: "2020-07-29T16:48:47.031129463Z" diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.yaml new file mode 100644 index 000000000..1088ea3c0 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: artifactory + version: 10.0.12 + repository: https://charts.jfrog.io/ diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/templates/NOTES.txt b/charts/artifactory-jcr/artifactory-jcr/2.5.100/templates/NOTES.txt new file mode 100644 index 000000000..035bf8417 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/templates/NOTES.txt @@ -0,0 +1 @@ +Congratulations. You have just deployed JFrog Container Registry! diff --git a/charts/artifactory-jcr/artifactory-jcr/2.5.100/values.yaml b/charts/artifactory-jcr/artifactory-jcr/2.5.100/values.yaml new file mode 100644 index 000000000..ffa1b6045 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/2.5.100/values.yaml @@ -0,0 +1,74 @@ +# Default values for artifactory-jcr. +# This is a YAML-formatted file. + +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# This chart is based on the main artifactory chart with some customizations. +# See all supported configuration keys in https://github.com/jfrog/charts/tree/master/stable/artifactory + +## All values are under the 'artifactory' sub chart. +artifactory: + + ## Artifactory + ## See full list of supported Artifactory options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + artifactory: + ## Default version is from the artifactory sub-chart in the requirements.yaml + image: + repository: docker.bintray.io/jfrog/artifactory-jcr + # version: + + ## Uncomment the following resources definitions or pass them from command line + ## to control the cpu and memory resources allocated by the Kubernetes cluster + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "4Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory. + ## You should set them according to the resources set above. + ## IMPORTANT: Make sure resources.limits.memory is at least 1G more than Xmx. + javaOpts: {} + # xms: "1g" + # xmx: "3g" + # other: "" + + installer: + platform: jcr-helm + + installerInfo: '{"productId": "Helm_artifactory-jcr/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + ## Nginx + ## See full list of supported Nginx options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + nginx: + enabled: true + tlsSecretName: "" + service: + type: LoadBalancer + + ## Ingress + ## See full list of supported Ingress options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + ingress: + enabled: false + tls: + + ## PostgreSQL + ## See list of supported postgresql options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + ## Configuration values for the PostgreSQL dependency sub-chart + ## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md + postgresql: + enabled: true + + ## This key is required for upgrades to protect old PostgreSQL chart's breaking changes. + databaseUpgradeReady: "yes" + + ## If NOT using the PostgreSQL in this chart (artifactory.postgresql.enabled=false), + ## specify custom database details here or leave empty and Artifactory will use embedded derby. + ## See full list of database options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + # database: + + +## Enable the PostgreSQL sub chart +postgresql: + enabled: true diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/CHANGELOG.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/CHANGELOG.md new file mode 100644 index 000000000..f56a5d644 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/CHANGELOG.md @@ -0,0 +1,151 @@ +# JFrog Container Registry Chart Changelog +All changes to this chart will be documented in this file. + +## [3.4.0] - Jan 4, 2020 +* Update dependency Artifactory chart version to 11.7.4 (Artifactory 7.12.5) + +## [3.3.1] - Dec 1, 2020 +* Update dependency Artifactory chart version to 11.5.4 (Artifactory 7.11.5) + +## [3.3.0] - Nov 23, 2020 +* Update dependency Artifactory chart version to 11.5.2 (Artifactory 7.11.2) + +## [3.2.2] - Nov 9, 2020 +* Update dependency Artifactory chart version to 11.4.5 (Artifactory 7.10.6) + +## [3.2.1] - Nov 2, 2020 +* Update dependency Artifactory chart version to 11.4.4 (Artifactory 7.10.5) + +## [3.2.0] - Oct 19, 2020 +* Update dependency Artifactory chart version to 11.4.0 (Artifactory 7.10.2) + +## [3.1.0] - Sep 30, 2020 +* Update dependency Artifactory chart version to 11.1.0 (Artifactory 7.9.0) + +## [3.0.2] - Sep 23, 2020 +* Updates readme + +## [3.0.1] - Sep 15, 2020 +* Update dependency Artifactory chart version to 11.0.1 (Artifactory 7.7.8) + +## [3.0.0] - Sep 14, 2020 +* **Breaking change:** Added `image.registry` and changed `image.version` to `image.tag` for docker images +* Update dependency Artifactory chart version to 11.0.0 (Artifactory 7.7.3) + +## [2.5.1] - Jul 29, 2020 +* Update dependency Artifactory chart version to 10.0.12 (Artifactory 7.6.3) + +## [2.5.0] - Jul 10, 2020 +* Update dependency Artifactory chart version to 10.0.3 (Artifactory 7.6.2) +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [2.4.0] - Jun 30, 2020 +* Update dependency Artifactory chart version to 9.6.0 (Artifactory 7.6.1) + +## [2.3.1] - Jun 12, 2020 +* Update dependency Artifactory chart version to 9.5.2 (Artifactory 7.5.7) + +## [2.3.0] - Jun 1, 2020 +* Update dependency Artifactory chart version to 9.5.0 (Artifactory 7.5.5) + +## [2.2.5] - May 27, 2020 +* Update dependency Artifactory chart version to 9.4.9 (Artifactory 7.4.3) + +## [2.2.4] - May 20, 2020 +* Update dependency Artifactory chart version to 9.4.6 (Artifactory 7.4.3) + +## [2.2.3] - May 07, 2020 +* Update dependency Artifactory chart version to 9.4.5 (Artifactory 7.4.3) +* Add `installerInfo` string format + +## [2.2.2] - Apr 28, 2020 +* Update dependency Artifactory chart version to 9.4.4 (Artifactory 7.4.3) + +## [2.2.1] - Apr 27, 2020 +* Update dependency Artifactory chart version to 9.4.3 (Artifactory 7.4.1) + +## [2.2.0] - Apr 14, 2020 +* Update dependency Artifactory chart version to 9.4.0 (Artifactory 7.4.1) + +## [2.2.0] - Apr 14, 2020 +* Update dependency Artifactory chart version to 9.4.0 (Artifactory 7.4.1) + +## [2.1.6] - Apr 13, 2020 +* Update dependency Artifactory chart version to 9.3.1 (Artifactory 7.3.2) + +## [2.1.5] - Apr 8, 2020 +* Update dependency Artifactory chart version to 9.2.8 (Artifactory 7.3.2) + +## [2.1.4] - Mar 30, 2020 +* Update dependency Artifactory chart version to 9.2.3 (Artifactory 7.3.2) + +## [2.1.3] - Mar 30, 2020 +* Update dependency Artifactory chart version to 9.2.1 (Artifactory 7.3.2) + +## [2.1.2] - Mar 26, 2020 +* Update dependency Artifactory chart version to 9.1.5 (Artifactory 7.3.2) + +## [2.1.1] - Mar 25, 2020 +* Update dependency Artifactory chart version to 9.1.4 (Artifactory 7.3.2) + +## [2.1.0] - Mar 23, 2020 +* Update dependency Artifactory chart version to 9.1.3 (Artifactory 7.3.2) + +## [2.0.13] - Mar 19, 2020 +* Update dependency Artifactory chart version to 9.0.28 (Artifactory 7.2.1) + +## [2.0.12] - Mar 17, 2020 +* Update dependency Artifactory chart version to 9.0.26 (Artifactory 7.2.1) + +## [2.0.11] - Mar 11, 2020 +* Unified charts public release + +## [2.0.10] - Mar 8, 2020 +* Update dependency Artifactory chart version to 9.0.20 (Artifactory 7.2.1) + +## [2.0.9] - Feb 26, 2020 +* Update dependency Artifactory chart version to 9.0.15 (Artifactory 7.2.1) + +## [2.0.0] - Feb 12, 2020 +* Update dependency Artifactory chart version to 9.0.0 (Artifactory 7.0.0) + +## [1.1.0] - Jan 19, 2020 +* Update dependency Artifactory chart version to 8.4.1 (Artifactory 6.17.0) + +## [1.1.1] - Feb 3, 2020 +* Update dependency Artifactory chart version to 8.4.4 + +## [1.1.0] - Jan 19, 2020 +* Update dependency Artifactory chart version to 8.4.1 (Artifactory 6.17.0) + +## [1.0.1] - Dec 31, 2019 +* Update dependency Artifactory chart version to 8.3.5 + +## [1.0.0] - Dec 23, 2019 +* Update dependency Artifactory chart version to 8.3.3 + +## [0.2.1] - Dec 12, 2019 +* Update dependency Artifactory chart version to 8.3.1 + +## [0.2.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [0.1.5] - Nov 28, 2019 +* Update dependency Artifactory chart version to 8.2.6 + +## [0.1.4] - Nov 20, 2019 +* Update Readme + +## [0.1.3] - Nov 20, 2019 +* Fix JCR logo url +* Update dependency to Artifactory 8.2.2 chart + +## [0.1.2] - Nov 20, 2019 +* Update JCR logo + +## [0.1.1] - Nov 20, 2019 +* Add `appVersion` to Chart.yaml + +## [0.1.0] - Nov 20, 2019 +* Initial release of the JFrog Container Registry helm chart diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/Chart.yaml new file mode 100644 index 000000000..c8a00025f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-jcr +apiVersion: v1 +appVersion: 7.12.5 +description: JFrog Container Registry +home: https://jfrog.com/container-registry/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-jcr/logo/jcr-logo.png +keywords: +- artifactory +- jfrog +- container +- registry +- devops +- jfrog-container-registry +maintainers: +- email: helm@jfrog.com + name: Chart Maintainers at JFrog +name: artifactory-jcr +sources: +- https://github.com/jfrog/charts +version: 3.4.000 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/LICENSE b/charts/artifactory-jcr/artifactory-jcr/3.4.000/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/README.md new file mode 100644 index 000000000..f573c93fe --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/README.md @@ -0,0 +1,131 @@ +# JFrog Container Registry Helm Chart + +JFrog Container Registry is a free Artifactory edition with Docker and Helm repositories support. + +**Heads up: Our Helm Chart docs are moving to our main documentation site. For Artifactory installers, see [Installing Artifactory](https://www.jfrog.com/confluence/display/JFROG/Installing+Artifactory).** + +## Prerequisites Details + +* Kubernetes 1.12+ + +## Chart Details +This chart will do the following: + +* Deploy JFrog Container Registry +* Deploy an optional Nginx server +* Deploy an optional PostgreSQL Database +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client. + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +### Install Chart +To install the chart with the release name `jfrog-container-registry`: +```bash +helm upgrade --install jfrog-container-registry --set artifactory.postgresql.postgresqlPassword= --namespace artifactory-jcr center/jfrog/artifactory-jcr +``` + +### Accessing JFrog Container Registry +**NOTE:** If using artifactory or nginx service type `LoadBalancer`, it might take a few minutes for JFrog Container Registry's public IP to become available. + +### Updating JFrog Container Registry +Once you have a new chart version, you can upgrade your deployment with +```bash +helm upgrade jfrog-container-registry center/jfrog/artifactory-jcr +``` + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + artifactory: + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 +``` +* Note: If you are upgrading from 1.x to 3.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` +* For more details about artifactory chart upgrades refer [here](https://github.com/jfrog/charts/blob/master/stable/artifactory/UPGRADE_NOTES.md) + +### Deleting JFrog Container Registry + +On helm v2: +```bash +helm delete --purge jfrog-container-registry +``` + +On helm v3: +```bash +helm delete jfrog-container-registry --namespace artifactory-jcr +``` + +This will delete your JFrog Container Registry deployment.
+**NOTE:** You might have left behind persistent volumes. You should explicitly delete them with +```bash +kubectl delete pvc ... +kubectl delete pv ... +``` + +## Database +The JFrog Container Registry chart comes with PostgreSQL deployed by default.
+For details on the PostgreSQL configuration or customising the database, Look at the options described in the [Artifactory helm chart](https://github.com/jfrog/charts/tree/master/stable/artifactory). + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these two lines to your Helm command: +```bash +helm upgrade --install jfrog-container-registry \ + --set artifactory.nginx.enabled=false \ + --set artifactory.ingress.enabled=true \ + --set artifactory.ingress.hosts[0]="artifactory.company.com" \ + --set artifactory.artifactory.service.type=NodePort \ + --namespace artifactory-jcr center/jfrog/artifactory-jcr +``` + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml +artifactory: + artifactory: + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - jfrog-container-registry.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - jfrog-container-registry.domain.com +``` + +## Useful links +https://www.jfrog.com +https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/app-readme.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/app-readme.md new file mode 100644 index 000000000..9d9b7d85f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/app-readme.md @@ -0,0 +1,18 @@ +# JFrog Container Registry Helm Chart + +Universal Repository Manager supporting all major packaging formats, build tools and CI servers. + +## Chart Details +This chart will do the following: + +* Deploy JFrog Container Registry +* Deploy an optional Nginx server +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + + +## Useful links +Blog: [Herd Trust Into Your Rancher Labs Multi-Cloud Strategy with Artifactory](https://jfrog.com/blog/herd-trust-into-your-rancher-labs-multi-cloud-strategy-with-artifactory/) + +## Activate Your Artifactory Instance +Don't have a license? Please send an email to [rancher-jfrog-licenses@jfrog.com](mailto:rancher-jfrog-licenses@jfrog.com) to get it. + diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/.helmignore b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/CHANGELOG.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/CHANGELOG.md new file mode 100644 index 000000000..9a35fa0e6 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/CHANGELOG.md @@ -0,0 +1,905 @@ +# JFrog Artifactory Chart Changelog +All changes to this chart will be documented in this file. + +## [11.7.4] - Jan 04, 2020 +* Fixed gid support for statefulset + +## [11.7.3] - Dec 31, 2020 +* Added gid support for statefulset +* Add setSecurityContext flag to allow securityContext block to be removed from artifactory statefulset + +## [11.7.2] - Dec 29, 2020 +* **Important:** Removed `.Values.metrics` and `.Values.fluentd` (Fluentd and Prometheus integrations) +* Add support for creating additional kubernetes resources - [refer here](https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-values.yaml) +* Updated Artifactory version to 7.12.5 + +## [11.7.1] - Dec 21, 2020 +* Updated Artifactory version to 7.12.3 + +## [11.7.0] - Dec 18, 2020 +* Updated Artifactory version to 7.12.2 +* Added `.Values.artifactory.openMetrics.enabled` + +## [11.6.1] - Dec 11, 2020 +* Added configurable `.Values.global.versions.artifactory` in values.yaml + +## [11.6.0] - Dec 10, 2020 +* Update postgresql tag version to `12.5.0-debian-10-r25` +* Fixed `artifactory.persistence.googleStorage.endpoint` from `storage.googleapis.com` to `commondatastorage.googleapis.com` +* Updated chart maintainers email + +## [11.5.5] - Dec 4, 2020 +* **Important:** Renamed `.Values.systemYaml` to `.Values.systemYamlOverride` + +## [11.5.4] - Dec 1, 2020 +* Improve error message returned when attempting helm upgrade command + +## [11.5.3] - Nov 30, 2020 +* Updated Artifactory version to 7.11.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) + +## [11.5.2] - Nov 23, 2020 +* Updated Artifactory version to 7.11.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.11) +* Updated port namings on services and pods to allow for istio protocol discovery +* Change semverCompare checks to support hosted Kubernetes +* Add flag to disable creation of ServiceMonitor when enabling prometheus metrics +* Prevent the PostHook command to be executed if the user did not specify a command in the values file +* Fix issue with tls file generation when nginx.https.enabled is false + +## [11.5.1] - Nov 19, 2020 +* Updated Artifactory version to 7.11.2 +* Bugfix - access.config.import.xml override Access Federation configurations + +## [11.5.0] - Nov 17, 2020 +* Updated Artifactory version to 7.11.1 +* Update alpine tag version to `3.12.1` + +## [11.4.6] - Nov 10, 2020 +* Pass system.yaml via external secret for advanced usecases +* Added support for custom ingress +* Bugfix - stateful set not picking up changes to database secrets + +## [11.4.5] - Nov 9, 2020 +* Updated Artifactory version to 7.10.6 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.6) + +## [11.4.4] - Nov 2, 2020 +* Add enablePathStyleAccess property for aws-s3-v3 binary provider template + +## [11.4.3] - Nov 2, 2020 +* Updated Artifactory version to 7.10.5 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.5) + +## [11.4.2] - Oct 22, 2020 +* Chown bug fix where Linux capability cannot chown all files causing log line warnings +* Fix Frontend timeout linting issue + +## [11.4.1] - Oct 20, 2020 +* Add flag to disable prepare-custom-persistent-volume init container + +## [11.4.0] - Oct 19, 2020 +* Updated Artifactory version to 7.10.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.10.2) + +## [11.3.2] - Oct 15, 2020 +* Add support to specify priorityClassName for nginx deployment + +## [11.3.1] - Oct 9, 2020 +* Add support for customInitContainersBegin + +## [11.3.0] - Oct 7, 2020 +* Updated Artifactory version to 7.9.1 +* **Breaking change:** Fix `storageClass` to correct `storageClassName` in values.yaml + +## [11.2.0] - Oct 5, 2020 +* Expose Prometheus metrics via a ServiceMonitor +* Parse log files for metric data with Fluentd + +## [11.1.0] - Sep 30, 2020 +* Updated Artifactory version to 7.9.0 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.9) +* Added support for resources in init container + +## [11.0.11] - Sep 25, 2020 +* Update to use linux capability CAP_CHOWN instead of root base init container to avoid any use of root containers to pass Redhat security requirements + +## [11.0.10] - Sep 28, 2020 +* Setting chart coordinates in migitation yaml + +## [11.0.9] - Sep 25, 2020 +* Update filebeat version to `7.9.2` + +## [11.0.8] - Sep 24, 2020 +* Fixed broken issue - when setting `waitForDatabase: false` container startup still waits for DB + +## [11.0.7] - Sep 22, 2020 +* Readme updates + +## [11.0.6] - Sep 22, 2020 +* Fix lint issue in migitation yaml + +## [11.0.5] - Sep 22, 2020 +* Fix broken migitation yaml + +## [11.0.4] - Sep 21, 2020 +* Added mitigation yaml for Artifactory - [More info](https://github.com/jfrog/chartcenter/blob/master/docs/securitymitigationspec.md) + +## [11.0.3] - Sep 17, 2020 +* Added configurable session(UI) timeout in frontend microservice + +## [11.0.2] - Sep 17, 2020 +* Added proper required text to be shown while postgres upgrades + +## [11.0.1] - Sep 14, 2020 +* Updated Artifactory version to 7.7.8 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7.8) + +## [11.0.0] - Sep 2, 2020 +* **Breaking change:** Changed `imagePullSecrets`values from string to list. +* **Breaking change:** Added `image.registry` and changed `image.version` to `image.tag` for docker images +* Added support for global values +* Updated maintainers in chart.yaml +* Update postgresql tag version to `12.3.0-debian-10-r71` +* Update postgresql chart version to `9.3.4` in requirements.yaml - [9.x Upgrade Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#900) +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass previous 9.x/10.x's postgresql.image.tag and databaseUpgradeReady=true + +## [10.1.0] - Aug 13, 2020 +* Updated Artifactory version to 7.7.3 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.7) + +## [10.0.15] - Aug 10, 2020 +* Added enableSignedUrlRedirect for persistent storage type aws-s3-v3. + +## [10.0.14] - Jul 31, 2020 +* Update the README section on Nginx SSL termination to reflect the actual YAML structure. + +## [10.0.13] - Jul 30, 2020 +* Added condition to disable the migration scripts. + +## [10.0.12] - Jul 28, 2020 +* Document Artifactory node affinity. + +## [10.0.11] - Jul 28, 2020 +* Added maxConnections for persistent storage type aws-s3-v3. + +## [10.0.10] - Jul 28, 2020 +* Bugfix / support for userPluginSecrets with Artifactory 7 + +## [10.0.9] - Jul 27, 2020 +* Add tpl to external database secrets +* Modified `scheme` to `artifactory.scheme` + +## [10.0.8] - Jul 23, 2020 +* Added condition to disable the migration init container. + +## [10.0.7] - Jul 21, 2020 +* Updated Artifactory Chart to add node and primary labels to pods and service objects. + +## [10.0.6] - Jul 20, 2020 +* Support custom CA and certificates + +## [10.0.5] - Jul 13, 2020 +* Updated Artifactory version to 7.6.3 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.3 +* Fixed Mysql database jar path in `preStartCommand` in README + +## [10.0.4] - Jul 10, 2020 +* Move some postgresql values to where they should be according to the subchart + +## [10.0.3] - Jul 8, 2020 +* Set Artifactory access client connections to the same value as the access threads + +## [10.0.2] - Jul 6, 2020 +* Updated Artifactory version to 7.6.2 +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [10.0.1] - Jul 01, 2020 +* Add dedicated ingress object for Replicator service when enabled + +## [10.0.0] - Jun 30, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update busybox tag version to `1.31.1` +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [9.6.0] - Jun 29, 2020 +* Updated Artifactory version to 7.6.1 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.6.1 +* Add tpl for external database secrets + +## [9.5.5] - Jun 25, 2020 +* Stop loading the Nginx stream module because it is now a core module + +## [9.5.4] - Jun 25, 2020 +* Notes.txt update - add --namespace parameter + +## [9.5.3] - Jun 11, 2020 +* Support list of custom secrets + +## [9.5.2] - Jun 12, 2020 +* Updated Artifactory version to 7.5.7 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5.7 + +## [9.5.1] - Jun 8, 2020 +* Readme update - configuring Artifactory with oracledb + +## [9.5.0] - Jun 1, 2020 +* Updated Artifactory version to 7.5.5 - https://www.jfrog.com/confluence/display/JFROG/Artifactory+Release+Notes#ArtifactoryReleaseNotes-Artifactory7.5 +* Fixes bootstrap configMap permission issue +* Update postgresql tag version to `9.6.18-debian-10-r7` + +## [9.4.9] - May 27, 2020 +* Added Tomcat maxThreads & acceptCount + +## [9.4.8] - May 25, 2020 +* Fixed postgresql README `image` Parameters + +## [9.4.7] - May 24, 2020 +* Fixed typo in README regarding migration timeout + +## [9.4.6] - May 19, 2020 +* Added metadata maxOpenConnections + +## [9.4.5] - May 07, 2020 +* Fix `installerInfo` string format + +## [9.4.4] - Apr 27, 2020 +* Updated Artifactory version to 7.4.3 + +## [9.4.3] - Apr 26, 2020 +* Change order of the customInitContainers to run before the "migration-artifactory" initContainer. + +## [9.4.2] - Apr 24, 2020 +* Fix `artifactory.persistence.awsS3V3.useInstanceCredentials` incorrect conditional logic +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [9.4.1] - Apr 16, 2020 +* Custom volumes in migration init container. + +## [9.4.0] - Apr 14, 2020 +* Updated Artifactory version to 7.4.1 + +## [9.3.1] - April 13, 2020 +* Update README with helm v3 commands + +## [9.3.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml +* Bump postgresql tag version to `9.6.17-debian-10-r21` in values.yaml + +## [9.2.9] - Apr 8, 2020 +* Added recommended ingress annotation to avoid 413 errors + +## [9.2.8] - Apr 8, 2020 +* Moved migration scripts under `files` directory +* Support preStartCommand in migration Init container as `artifactory.migration.preStartCommand` + +## [9.2.7] - Apr 6, 2020 +* Fix cache size (should be 5gb instead of 50gb since volume claim is only 20gb). + +## [9.2.6] - Apr 1, 2020 +* Support masterKey and joinKey as secrets + +## [9.2.5] - Apr 1, 2020 +* Fix readme use to `-hex 32` instead of `-hex 16` + +## [9.2.4] - Mar 31, 2020 +* Change the way the artifactory `command:` is set so it will properly pass a SIGTERM to java + +## [9.2.3] - Mar 29, 2020 +* Add Nginx log options: stderr as logfile and log level + +## [9.2.2] - Mar 30, 2020 +* Use the same defaulting mechanism used for the artifactory version used elsewhere in the chart + +## [9.2.1] - Mar 29, 2020 +* Fix loggers sidecars configurations to support new file system layout and new log names + +## [9.2.0] - Mar 29, 2020 +* Fix broken admin user bootstrap configuration +* **Breaking change:** renamed `artifactory.accessAdmin` to `artifactory.admin` + +## [9.1.5] - Mar 26, 2020 +* Fix volumeClaimTemplate issue + +## [9.1.4] - Mar 25, 2020 +* Fix volume name used by filebeat container + +## [9.1.3] - Mar 24, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [9.1.2] - Mar 22, 2020 +* Support for SSL offload in Nginx service(LoadBalancer) layer. Introduced `nginx.service.ssloffload` field with boolean type. + +## [9.1.1] - Mar 23, 2020 +* Moved installer info to values.yaml so it is fully customizable + +## [9.1.0] - Mar 23, 2020 +* Updated Artifactory version to 7.3.2 + +## [9.0.29] - Mar 20, 2020 +* Add support for masterKey trim during 6.x to 7.x migration if 6.x masterKey is 32 hex (64 characters) + +## [9.0.28] - Mar 18, 2020 +* Increased Nginx proxy_buffers size + +## [9.0.27] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files +* useInstanceCredentials variable was declared in S3 settings but not used in chart. Now it is being used. + +## [9.0.26] - Mar 17, 2020 +* Fix rendering of Service Account annotations + +## [9.0.25] - Mar 16, 2020 +* Update Artifactory readme with extra ingress annotations needed for Artifactory to be set as SSO provider + +## [9.0.24] - Mar 16, 2020 +* Add Unsupported message from 6.18 to 7.2.x (migration) + +## [9.0.23] - Mar 12, 2020 +* Fix README.md rendering issue + +## [9.0.22] - Mar 11, 2020 +* Upgrade Docs update + +## [9.0.21] - Mar 11, 2020 +* Unified charts public release + +## [9.0.20] - Mar 6, 2020 +* Fix path to `/artifactory_bootstrap` +* Add support for controlling the name of the ingress and allow to set more than one cname + +## [9.0.19] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [9.0.18] - Feb 28, 2020 +* Add support to process `valueFrom` for extraEnvironmentVariables + +## [9.0.17] - Feb 26, 2020 +* Fix join key secret naming + +## [9.0.16] - Feb 26, 2020 +* Store join key to secret + +## [9.0.15] - Feb 26, 2020 +* Updated Artifactory version to 7.2.1 + +## [9.0.10] - Feb 07, 2020 +* Remove protection flag `databaseUpgradeReady` which was added to check internal postgres upgrade + +## [9.0.0] - Feb 07, 2020 +* Updated Artifactory version to 7.0.0 + +## [8.4.8] - Feb 13, 2020 +* Add support for SSH authentication to Artifactory + +## [8.4.7] - Feb 11, 2020 +* Change Artifactory service port name to be hard-coded to `http` instead of using `{{ .Release.Name }}` + +## [8.4.6] - Feb 9, 2020 +* Add support for `tpl` in the `postStartCommand` + +## [8.4.5] - Feb 4, 2020 +* Support customisable Nginx kind + +## [8.4.4] - Feb 2, 2020 +* Add a comment stating that it is recommended to use an external PostgreSQL with a static password for production installations + +## [8.4.3] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [8.4.2] - Jan 26, 2020 +* Improve `database.user` and `database.password` logic in order to support more use cases and make the configuration less repetitive + +## [8.4.1] - Jan 19, 2020 +* Fix replicator port config in nginx replicator configmap + +## [8.4.0] - Jan 19, 2020 +* Updated Artifactory version to 6.17.0 + +## [8.3.6] - Jan 16, 2020 +* Added example for external nginx-ingress + +## [8.3.5] - Dec 30, 2019 +* Fix for nginx probes failing when launched with http disabled + +## [8.3.4] - Dec 24, 2019 +* Better support for custom `artifactory.internalPort` + +## [8.3.3] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [8.3.2] - Dec 16, 2019 +* Fix for toggling nginx service ports + +## [8.3.1] - Dec 12, 2019 +* Add support for toggling nginx service ports + +## [8.3.0] - Dec 1, 2019 +* Updated Artifactory version to 6.16.0 + +## [8.2.6] - Nov 28, 2019 +* Add support for using existing PriorityClass + +## [8.2.5] - Nov 27, 2019 +* Add support for PriorityClass + +## [8.2.4] - Nov 21, 2019 +* Add an option to use a file system cache-fs with the file-system binarystore template + +## [8.2.3] - Nov 20, 2019 +* Update Artifactory Readme + +## [8.2.2] - Nov 20, 2019 +* Update Artfactory logo + +## [8.2.1] - Nov 18, 2019 +* Add the option to provide service account annotations (in order to support stuff like https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html) + +## [8.2.0] - Nov 18, 2019 +* Updated Artifactory version to 6.15.0 + +## [8.1.11] - Nov 17, 2019 +* Do not provide a default master key. Allow it to be auto generated by Artifactory on first startup + +## [8.1.10] - Nov 17, 2019 +* Fix creation of double slash in nginx artifactory configuration + +## [8.1.9] - Nov 14, 2019 +* Set explicit `postgresql.postgresqlPassword=""` to avoid helm v3 error + +## [8.1.8] - Nov 12, 2019 +* Updated Artifactory version to 6.14.1 + +## [8.1.7] - Nov 9, 2019 +* Additional documentation for masterKey + +## [8.1.6] - Nov 10, 2019 +* Update PostgreSQL chart version to 7.0.1 +* Use formal PostgreSQL configuration format + +## [8.1.5] - Nov 8, 2019 +* Add support `artifactory.service.loadBalancerSourceRanges` for whitelisting when setting `artifactory.service.type=LoadBalancer` + +## [8.1.4] - Nov 6, 2019 +* Add support for any type of environment variable by using `extraEnvironmentVariables` as-is + +## [8.1.3] - Nov 6, 2019 +* Add nodeselector support for Postgresql + +## [8.1.2] - Nov 5, 2019 +* Add support for the aws-s3-v3 filestore, which adds support for pod IAM roles + +## [8.1.1] - Nov 4, 2019 +* When using `copyOnEveryStartup`, make sure that the target base directories are created before copying the files + +## [8.1.0] - Nov 3, 2019 +* Updated Artifactory version to 6.14.0 + +## [8.0.1] - Nov 3, 2019 +* Make sure the artifactory pod exits when one of the pre-start stages fail + +## [8.0.0] - Oct 27, 2019 +**IMPORTANT - BREAKING CHANGES!**
+**DOWNTIME MIGHT BE REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), must use the upgrade instructions in [UPGRADE_NOTES.md](UPGRADE_NOTES.md)! +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is **not backward compatible** with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations + +## [7.18.3] - Oct 24, 2019 +* Change the preStartCommand to support templating + +## [7.18.2] - Oct 21, 2019 +* Add support for setting `artifactory.labels` +* Add support for setting `nginx.labels` + +## [7.18.1] - Oct 10, 2019 +* Updated Artifactory version to 6.13.1 + +## [7.18.0] - Oct 7, 2019 +* Updated Artifactory version to 6.13.0 + +## [7.17.5] - Sep 24, 2019 +* Option to skip wait-for-db init container with '--set waitForDatabase=false' + +## [7.17.4] - Sep 11, 2019 +* Updated Artifactory version to 6.12.2 + +## [7.17.3] - Sep 9, 2019 +* Updated Artifactory version to 6.12.1 + +## [7.17.2] - Aug 22, 2019 +* Fix the nginx server_name directive used with ingress.hosts + +## [7.17.1] - Aug 21, 2019 +* Enable the Artifactory container's liveness and readiness probes + +## [7.17.0] - Aug 21, 2019 +* Updated Artifactory version to 6.12.0 + +## [7.16.11] - Aug 14, 2019 +* Updated Artifactory version to 6.11.6 + +## [7.16.10] - Aug 11, 2019 +* Fix Ingress routing and add an example + +## [7.16.9] - Aug 5, 2019 +* Do not mount `access/etc/bootstrap.creds` unless user specifies a custom password or secret (Access already generates a random password if not provided one) +* If custom `bootstrap.creds` is provided (using keys or custom secret), prepare it with an init container so the temp file does not persist + +## [7.16.8] - Aug 4, 2019 +* Improve binarystore config + 1. Convert to a secret + 2. Move config to values.yaml + 3. Support an external secret + +## [7.16.7] - Jul 29, 2019 +* Don't create the nginx configmaps when nginx.enabled is false + +## [7.16.6] - Jul 24, 2019 +* Simplify nginx setup and shorten initial wait for probes + +## [7.16.5] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [7.16.4] - Jul 22, 2019 +* Updated Artifactory version to 6.11.3 + +## [7.16.3] - Jul 11, 2019 +* Add ingress.hosts to the Nginx server_name directive when ingress is enabled to help with Docker repository sub domain configuration + +## [7.16.2] - Jul 3, 2019 +* Fix values key in reverse proxy example + +## [7.16.1] - Jul 1, 2019 +* Updated Artifactory version to 6.11.1 + +## [7.16.0] - Jun 27, 2019 +* Update Artifactory version to 6.11 and add restart to Artifactory when bootstrap.creds file has been modified + +## [7.15.8] - Jun 27, 2019 +* Add the option for changing nginx config using values.yaml and remove outdated reverse proxy documentation + +## [7.15.6] - Jun 24, 2019 +* Update chart maintainers + +## [7.15.5] - Jun 24, 2019 +* Change Nginx to point to the artifactory externalPort + +## [7.15.4] - Jun 23, 2019 +* Add the option to provide an IP for the access-admin endpoints + +## [7.15.3] - Jun 23, 2019 +* Add values files for small, medium and large installations + +## [7.15.2] - Jun 20, 2019 +* Add missing terminationGracePeriodSeconds to values.yaml + +## [7.15.1] - Jun 19, 2019 +* Updated Artifactory version to 6.10.4 + +## [7.15.0] - Jun 17, 2019 +* Use configmaps for nginx configuration and remove nginx postStart command + +## [7.14.8] - Jun 18, 2019 +* Add the option to provide additional ingress rules + +## [7.14.7] - Jun 14, 2019 +* Updated readme with improved external database setup example + +## [7.14.6] - Jun 11, 2019 +* Updated Artifactory version to 6.10.3 +* Updated installer-info template + +## [7.14.5] - Jun 6, 2019 +* Updated Google Cloud Storage API URL and https settings + +## [7.14.4] - Jun 5, 2019 +* Delete the db.properties file on Artifactory startup + +## [7.14.3] - Jun 3, 2019 +* Updated Artifactory version to 6.10.2 + +## [7.14.2] - May 21, 2019 +* Updated Artifactory version to 6.10.1 + +## [7.14.1] - May 19, 2019 +* Fix missing logger image tag + +## [7.14.0] - May 7, 2019 +* Updated Artifactory version to 6.10.0 + +## [7.13.21] - May 5, 2019 +* Add support for setting `artifactory.async.corePoolSize` + +## [7.13.20] - May 2, 2019 +* Remove unused property `artifactory.releasebundle.feature.enabled` + +## [7.13.19] - May 1, 2019 +* Fix indentation issue with the replicator system property + +## [7.13.18] - Apr 30, 2019 +* Add support for JMX monitoring + +## [7.13.17] - Apr 25, 2019 +* Added support for `cacheProviderDir` + +## [7.13.16] - Apr 18, 2019 +* Changing API StatefulSet version to `v1` and permission fix for custom `artifactory.conf` for Nginx + +## [7.13.15] - Apr 16, 2019 +* Updated documentation for Reverse Proxy Configuration + +## [7.13.14] - Apr 15, 2019 +* Added support for `customVolumeMounts` + +## [7.13.13] - Aprl 12, 2019 +* Added support for `bucketExists` flag for googleStorage + +## [7.13.12] - Apr 11, 2019 +* Replace `curl` examples with `wget` due to the new base image + +## [7.13.11] - Aprl 07, 2019 +* Add support for providing the Artifactory license as a parameter + +## [7.13.10] - Apr 10, 2019 +* Updated Artifactory version to 6.9.1 + +## [7.13.9] - Aprl 04, 2019 +* Add support for templated extraEnvironmentVariables + +## [7.13.8] - Aprl 07, 2019 +* Change network policy API group + +## [7.13.7] - Aprl 04, 2019 +* Bugfix for userPluginSecrets + +## [7.13.6] - Apr 4, 2019 +* Add information about upgrading Artifactory with auto-generated postgres password + +## [7.13.5] - Aprl 03, 2019 +* Added installer info + +## [7.13.4] - Aprl 03, 2019 +* Allow secret names for user plugins to contain template language + +## [7.13.3] - Apr 02, 2019 +* Allow NetworkPolicy configurations (defaults to allow all) + +## [7.13.2] - Aprl 01, 2019 +* Add support for user plugin secret + +## [7.13.1] - Mar 27, 2019 +* Add the option to copy a list of files to ARTIFACTORY_HOME on startup + +## [7.13.0] - Mar 26, 2019 +* Updated Artifactory version to 6.9.0 + +## [7.12.18] - Mar 25, 2019 +* Add CI tests for persistence, ingress support and nginx + +## [7.12.17] - Mar 22, 2019 +* Add the option to change the default access-admin password + +## [7.12.16] - Mar 22, 2019 +* Added support for `.Probe.path` to customise the paths used for health probes + +## [7.12.15] - Mar 21, 2019 +* Added support for `artifactory.customSidecarContainers` to create custom sidecar containers +* Added support for `artifactory.customVolumes` to create custom volumes + +## [7.12.14] - Mar 21, 2019 +* Make ingress path configurable + +## [7.12.13] - Mar 19, 2019 +* Move the copy of bootstrap config from postStart to preStart + +## [7.12.12] - Mar 19, 2019 +* Fix existingClaim example + +## [7.12.11] - Mar 18, 2019 +* Add information about nginx persistence + +## [7.12.10] - Mar 15, 2019 +* Wait for nginx configuration file before using it + +## [7.12.9] - Mar 15, 2019 +* Revert securityContext changes since they were causing issues + +## [7.12.8] - Mar 15, 2019 +* Fix issue #247 (init container failing to run) + +## [7.12.7] - Mar 14, 2019 +* Updated Artifactory version to 6.8.7 +* Add support for Artifactory-CE for C++ + +## [7.12.6] - Mar 13, 2019 +* Move securityContext to container level + +## [7.12.5] - Mar 11, 2019 +* Updated Artifactory version to 6.8.6 + +## [7.12.4] - Mar 8, 2019 +* Fix existingClaim option + +## [7.12.3] - Mar 5, 2019 +* Updated Artifactory version to 6.8.4 + +## [7.12.2] - Mar 4, 2019 +* Add support for catalina logs sidecars + +## [7.12.1] - Feb 27, 2019 +* Updated Artifactory version to 6.8.3 + +## [7.12.0] - Feb 25, 2019 +* Add nginx support for tail sidecars + +## [7.11.1] - Feb 20, 2019 +* Added support for enterprise storage + +## [7.10.2] - Feb 19, 2019 +* Updated Artifactory version to 6.8.2 + +## [7.10.1] - Feb 17, 2019 +* Updated Artifactory version to 6.8.1 +* Add example of `SERVER_XML_EXTRA_CONNECTOR` usage + +## [7.10.0] - Feb 15, 2019 +* Updated Artifactory version to 6.8.0 + +## [7.9.6] - Feb 13, 2019 +* Updated Artifactory version to 6.7.3 + +## [7.9.5] - Feb 12, 2019 +* Add support for tail sidecars to view logs from k8s api + +## [7.9.4] - Feb 6, 2019 +* Fix support for customizing statefulset `terminationGracePeriodSeconds` + +## [7.9.3] - Feb 5, 2019 +* Add instructions on how to deploy Artifactory with embedded Derby database + +## [7.9.2] - Feb 5, 2019 +* Add support for customizing statefulset `terminationGracePeriodSeconds` + +## [7.9.1] - Feb 3, 2019 +* Updated Artifactory version to 6.7.2 + +## [7.9.0] - Jan 23, 2019 +* Updated Artifactory version to 6.7.0 + +## [7.8.9] - Jan 22, 2019 +* Added support for `artifactory.customInitContainers` to create custom init containers + +## [7.8.8] - Jan 17, 2019 +* Added support of values ingress.labels + +## [7.8.7] - Jan 16, 2019 +* Mount replicator.yaml (config) directly to /replicator_extra_conf + +## [7.8.6] - Jan 13, 2019 +* Fix documentation about nginx group id + +## [7.8.5] - Jan 13, 2019 +* Updated Artifactory version to 6.6.5 + +## [7.8.4] - Jan 8, 2019 +* Make artifactory.replicator.publicUrl required when the replicator is enabled + +## [7.8.3] - Jan 1, 2019 +* Updated Artifactory version to 6.6.3 +* Add support for `artifactory.extraEnvironmentVariables` to pass more environment variables to Artifactory + +## [7.8.2] - Dec 28, 2018 +* Fix location `replicator.yaml` is copied to + +## [7.8.1] - Dec 27, 2018 +* Updated Artifactory version to 6.6.1 + +## [7.8.0] - Dec 20, 2018 +* Updated Artifactory version to 6.6.0 + +## [7.7.13] - Dec 17, 2018 +* Updated Artifactory version to 6.5.13 + +## [7.7.12] - Dec 12, 2018 +* Fix documentation about Artifactory license setup using secret + +## [7.7.11] - Dec 10, 2018 +* Fix issue when using existing claim + +## [7.7.10] - Dec 5, 2018 +* Remove Distribution certificates creation. + +## [7.7.9] - Nov 30, 2018 +* Updated Artifactory version to 6.5.9 + +## [7.7.8] - Nov 29, 2018 +* Updated postgresql version to 9.6.11 + +## [7.7.7] - Nov 27, 2018 +* Updated Artifactory version to 6.5.8 + +## [7.7.6] - Nov 19, 2018 +* Added support for configMap to use custom Reverse Proxy Configuration with Nginx + +## [7.7.5] - Nov 14, 2018 +* Fix location of `nodeSelector`, `affinity` and `tolerations` + +## [7.7.4] - Nov 14, 2018 +* Updated Artifactory version to 6.5.3 + +## [7.7.3] - Nov 12, 2018 +* Support artifactory.preStartCommand for running command before entrypoint starts + +## [7.7.2] - Nov 7, 2018 +* Support database.url parameter (DB_URL) + +## [7.7.1] - Oct 29, 2018 +* Change probes port to 8040 (so they will not be blocked when all tomcat threads on 8081 are exhausted) + +## [7.7.0] - Oct 28, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [7.6.8] - Oct 23, 2018 +* Fix providing external secret for database credentials + +## [7.6.7] - Oct 23, 2018 +* Allow user to configure externalTrafficPolicy for Loadbalancer + +## [7.6.6] - Oct 22, 2018 +* Updated ingress annotation support (with examples) to support docker registry v2 + +## [7.6.5] - Oct 21, 2018 +* Updated Artifactory version to 6.5.2 + +## [7.6.4] - Oct 19, 2018 +* Allow providing pre-existing secret containing master key +* Allow arbitrary annotations on primary and member node pods +* Enforce size limits when using local storage with `emptyDir` +* Allow providing pre-existing secrets containing external database credentials + +## [7.6.3] - Oct 18, 2018 +* Updated Artifactory version to 6.5.1 + +## [7.6.2] - Oct 17, 2018 +* Add Apache 2.0 license + +## [7.6.1] - Oct 11, 2018 +* Supports master-key in the secrets and stateful-set +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) + +## [7.6.0] - Oct 11, 2018 +* Updated Artifactory version to 6.5.0 + +## [7.5.4] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [7.5.3] - Oct 4, 2018 +* Add PostgreSQL resources template + +## [7.5.2] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [7.5.1] - Oct 2, 2018 +* Set Artifactory to 6.4.1 + +## [7.5.0] - Sep 27, 2018 +* Set Artifactory to 6.4.0 + +## [7.4.3] - Sep 26, 2018 +* Add ci/test-values.yaml + +## [7.4.2] - Sep 2, 2018 +* Updated Artifactory version to 6.3.2 +* Removed unused PVC + +## [7.4.0] - Aug 22, 2018 +* Added support to run as non root +* Updated Artifactory version to 6.2.0 + +## [7.3.0] - Aug 22, 2018 +* Enabled RBAC Support +* Added support for PostStartCommand (To download Database JDBC connector) +* Increased postgresql max_connections +* Added support for `nginx.conf` ConfigMap +* Updated Artifactory version to 6.1.0 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/Chart.yaml new file mode 100644 index 000000000..884564cc9 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +appVersion: 7.12.5 +description: Universal Repository Manager supporting all major packaging formats, build tools and CI servers. +home: https://www.jfrog.com/artifactory/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory/logo/artifactory-logo.png +keywords: +- artifactory +- jfrog +- devops +maintainers: +- email: installers@jfrog.com + name: Chart Maintainers at JFrog +name: artifactory +sources: +- https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view +- https://github.com/jfrog/charts +version: 11.7.4 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/LICENSE b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/README.md new file mode 100644 index 000000000..9c7551c92 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/README.md @@ -0,0 +1,1151 @@ +# JFrog Artifactory Helm Chart + +**Heads up: Our Helm Chart docs are moving to our main documentation site. For Artifactory installers, see [Installing Artifactory](https://www.jfrog.com/confluence/display/JFROG/Installing+Artifactory).** + +## Prerequisites Details + +* Kubernetes 1.12+ +* Artifactory Pro trial license [get one from here](https://www.jfrog.com/artifactory/free-trial/) + +## Chart Details +This chart will do the following: + +* Deploy Artifactory-Pro/Artifactory-Edge (or OSS/CE if custom image is set) +* Deploy a PostgreSQL database using the stable/postgresql chart (can be changed) **NOTE:** For production grade installations it is recommended to use an external PostgreSQL. +* Deploy an optional Nginx server +* Optionally expose Artifactory with Ingress [Ingress documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) + +## Installing the Chart + +### Add ChartCenter Helm repository + +Before installing JFrog helm charts, you need to add the [ChartCenter helm repository](https://chartcenter.io) to your helm client + +```bash +helm repo add center https://repo.chartcenter.io +helm repo update +``` + +### Install Chart +To install the chart with the release name `artifactory`: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory +``` + +### System Configuration +Artifactory uses a common system configuration file - `system.yaml`. See [official documentation](https://www.jfrog.com/confluence/display/JFROG/System+YAML+Configuration+File) on its usage. +In order to override the default `system.yaml` configuration, do the following: +```bash +artifactory: + systemYaml: | + +``` + +### Deploying Artifactory with embedded Derby database +By default, this chart deploys Artifactory with PostgreSQL (running in a separate pod). +It's possible to deploy Artifactory without PostgreSQL (or any other external database), which will default to the embedded [Derby database](https://db.apache.org/derby/). +```bash +# Disable the default postgresql +helm upgrade --install artifactory --set postgresql.enabled=false --namespace artifactory center/jfrog/artifactory +``` +Artifactory will start with it's embedded Derby database. + +### Accessing Artifactory +**NOTE:** It might take a few minutes for Artifactory's public IP to become available. +Follow the instructions outputted by the install command to get the Artifactory IP to access it. + +### Updating Artifactory +Once you have a new chart version, you can update your deployment with +```bash +helm upgrade artifactory --namespace artifactory center/jfrog/artifactory +``` + +If artifactory was installed without providing a value to postgresql.postgresqlPassword (a password was autogenerated), follow these instructions: +1. Get the current password by running: +```bash +POSTGRES_PASSWORD=$(kubectl get secret -n -postgresql -o jsonpath="{.data.postgresql-password}" | base64 --decode) +``` +2. Upgrade the release by passing the previously auto-generated secret: +```bash +helm upgrade center/jfrog/artifactory --set postgresql.postgresqlPassword=${POSTGRES_PASSWORD} --namespace +``` + +This will apply any configuration changes on your existing deployment. + +### Special Upgrade Notes +#### Artifactory upgrade from 6.x to 7.x (App Version) +Arifactory 6.x to 7.x upgrade requires a one time migration process. This is done automatically on pod startup if needed. +It's possible to configure the migration timeout with the following configuration in extreme cases. The provided default should be more than enough for completion of the migration. +```yaml +artifactory: + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 +``` +* Note: If you are upgrading from 8.x to 11.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +### Artifactory memory and CPU resources +The Artifactory Helm chart comes with support for configured resource requests and limits to Artifactory, Nginx and PostgreSQL. By default, these settings are commented out. +It is **highly** recommended to set these so you have full control of the allocated resources and limits. +Artifactory java memory parameters can (and should) also be set to match the allocated resources with `artifactory.javaOpts.xms` and `artifactory.javaOpts.xmx`. +```bash +# Example of setting resource requests and limits to all pods (including passing java memory settings to Artifactory) +helm upgrade --install artifactory \ + --set artifactory.resources.requests.cpu="500m" \ + --set artifactory.resources.limits.cpu="2" \ + --set artifactory.resources.requests.memory="1Gi" \ + --set artifactory.resources.limits.memory="4Gi" \ + --set artifactory.javaOpts.xms="1g" \ + --set artifactory.javaOpts.xmx="4g" \ + --set nginx.resources.requests.cpu="100m" \ + --set nginx.resources.limits.cpu="250m" \ + --set nginx.resources.requests.memory="250Mi" \ + --set nginx.resources.limits.memory="500Mi" \ + --namespace artifactory center/jfrog/artifactory +``` +Get more details on configuring Artifactory in the [official documentation](https://www.jfrog.com/confluence/). + +Although it is possible to set resources limits and requests this way, it is recommended to use the pre-built values files +for small, medium and large installation and change them according to your needs (if necessary), as described [here](#Deploying-Artifactory-for-small/medium/large-installations) + +### Deploying Artifactory for small/medium/large installations +In the chart directory, we have added three values files, one for each installation type - small/medium/large. These values files are recommendations for setting resources requests and limits for your installation. The values are derived from the following [documentation](https://www.jfrog.com/confluence/display/EP/Installing+on+Kubernetes#InstallingonKubernetes-Systemrequirements). You can find them in the corresponding chart directory - values-small.yaml, values-medium.yaml and values-large.yaml + + +### Artifactory storage +When using an enterprise license. Artifactory supports a wide range of storage back ends. You can see more details on [Artifactory Filestore options](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore) + +In this chart, you set the type of storage you want with `artifactory.persistence.type` and pass the required configuration settings. +The default storage in this chart is the `file-system` replication, where the data is replicated to all nodes. + +#### NFS +To use an NFS server as your cluster's storage, you need to +- Setup an NFS server. Get its IP as `NFS_IP` +- Create a `data` and `backup` directories on the NFS exported directory with write permissions to all +- Pass NFS parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=nfs \ +--set artifactory.persistence.nfs.ip=${NFS_IP} \ +... +``` + +#### Using a network file system with the file-system persistence type +In some cases, it is not possible for the helm chart to set up your NFS mounts automatically for Artiactory. +In such cases, such as using AWS EFS, you will use the `artifactory.persistnece.type=file-system` even though your underlying persistence is actually a network file system. +The same thing applies when using a slow storage device (such as cheap disks) as your main storage solution for Artifactory. +This means that serving highly used files from the network file system/slow storage can take time, +and that's why you would want a cache filesystem that's stored locally on disk (fast disks like SSD). + +This is how you would configure it: +Create a values file with the following content: +1. Set up your volume mount to your fast storage device +```yaml +artifactory: + ## Set up your volume mount to your fast storage device + customVolumes: | + - name: my-cache-fast-storage + persistentVolumeClaim: + claimName: my-cache-fast-storage-pvc + ## Enable caching and configure the cache directory + customVolumeMounts: | + - name: my-cache-fast-storage + mountPath: /my-fast-cache-mount + ## Install the helm chart with the values file you created + persistence: + cacheProviderDir: /my-fast-cache-mount + fileSystem: + cache: + enabled: true + +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory --namespace artifactory -f values.yaml +``` + +#### Google Storage +To use a Google Storage bucket as the cluster's filestore. See [Google Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-GoogleStorageBinaryProvider) +- Pass Google Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=google-storage \ +--set artifactory.persistence.googleStorage.identity=${GCP_ID} \ +--set artifactory.persistence.googleStorage.credential=${GCP_KEY} \ +... +``` + +#### AWS S3 +**NOTE** Keep in mind that when using the `aws-s3` persistence type, you will not be able to provide an IAM on the pod level. +In order to grant permissions to Artifactory using an IAM role, you will have to attach the said IAM role to the machine(s) on which Artifactory is running. +This is due to the fact that the `aws-s3` template uses the `JetS3t` library to interact with AWS. If you want to grant an IAM role at the pod level, see the `AWS S3 Vs` section. + +To use an AWS S3 bucket as the cluster's filestore. See [S3 Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-S3BinaryProvider) +- Pass AWS S3 parameters to `helm install` and `helm upgrade` +```bash +... +# With explicit credentials: +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3.credential=${AWS_SECRET_ACCESS_KEY} \ +... + +... +# With using existing IAM role +--set artifactory.persistence.type=aws-s3 \ +--set artifactory.persistence.awsS3.endpoint=${AWS_S3_ENDPOINT} \ +--set artifactory.persistence.awsS3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3.roleName=${AWS_ROLE_NAME} \ +... +``` +**NOTE:** Make sure S3 `endpoint` and `region` match. See [AWS documentation on endpoint](https://docs.aws.amazon.com/general/latest/gr/rande.html) + +#### AWS S3 V3 +To use an AWS S3 bucket as the cluster's filestore and access it with the official AWS SDK, See [S3 Official SDK Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AmazonS3OfficialSDKTemplate). +This filestore template uses the official AWS SDK, unlike th`aws-s3` implementation that uses the `JetS3t` library. +Use this template if you want to attach an IAM role to the Artifactory pod directly (as opposed to attaching it to the machine/s that Artifactory will run on). + +**NOTE** This will have to be combined with a k8s mechanism for attaching IAM roles to pods, like [kube2iam](https://github.com/helm/charts/tree/master/stable/kube2iam) or anything similar. + +- Pass AWS S3 V3 parameters and the annotation pointing to the IAM role (when using an IAM role. this is kube2iam specific and may vary depending on the implementation) to `helm install` and `helm upgrade` + +```bash +# With explicit credentials: +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.persistence.awsS3V3.identity=${AWS_ACCESS_KEY_ID} \ +--set artifactory.persistence.awsS3V3.credential=${AWS_SECRET_ACCESS_KEY} \ +... +``` + +```bash +# With using existing IAM role +--set artifactory.persistence.type=aws-s3-v3 \ +--set artifactory.persistence.awsS3V3.region=${AWS_REGION} \ +--set artifactory.persistence.awsS3V3.bucketName=${AWS_S3_BUCKET_NAME} \ +--set artifactory.annotations.'iam\.amazonaws\.com/role'=${AWS_IAM_ROLE_ARN} +... +``` + +To enable [Direct Cloud Storage Download](https://www.jfrog.com/confluence/display/JFROG/Direct+Cloud+Storage+Download#DirectCloudStorageDownload-1.ConfiguretheArtifactoryFilestore) +```bash +... +--set artifactory.persistence.awsS3V3.enableSignedUrlRedirect=true \ +... +``` + +#### Microsoft Azure Blob Storage +To use Azure Blob Storage as the cluster's filestore. See [Azure Blob Storage Binary Provider](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore#ConfiguringtheFilestore-AzureBlobStorageClusterBinaryProvider) +- Pass Azure Blob Storage parameters to `helm install` and `helm upgrade` +```bash +... +--set artifactory.persistence.type=azure-blob \ +--set artifactory.persistence.azureBlob.accountName=${AZURE_ACCOUNT_NAME} \ +--set artifactory.persistence.azureBlob.accountKey=${AZURE_ACCOUNT_KEY} \ +--set artifactory.persistence.azureBlob.endpoint=${AZURE_ENDPOINT} \ +--set artifactory.persistence.azureBlob.containerName=${AZURE_CONTAINER_NAME} \ +... +``` + +* To use a persistent volume claim as cache dir together with Azure Blob Storage, additionally pass the following parameters to `helm install` and `helm upgrade` (make sure `mountPath` and `cacheProviderDir` point to the same location) +```bash +... +--set artifactory.persistence.existingClaim=${YOUR_CLAIM} \ +--set artifactory.persistence.mountPath=/opt/cache-dir \ +--set artifactory.persistence.cacheProviderDir=/opt/cache-dir \ +... +``` + +#### Custom binarystore.xml +You have an option to provide a custom [binarystore.xml](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Filestore).
+There are two options for this + +1. Editing directly in [values.yaml](values.yaml) +```yaml +artifactory: + persistence: + binarystoreXml: | + + + + + +``` + +2. Create your own [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and pass it to your `helm install` command +```yaml +# Prepare your custom Secret file (custom-binarystore.yaml) +kind: Secret +apiVersion: v1 +metadata: + name: custom-binarystore + labels: + app: artifactory + chart: artifactory +stringData: + binarystore.xml: |- + + + + +``` + +```bash +# Create a secret from the file +kubectl apply -n artifactory -f ./custom-binarystore.yaml + +# Pass it to your helm install command: +helm upgrade --install artifactory --namespace artifactory --set artifactory.persistence.customBinarystoreXmlSecret=custom-binarystore center/jfrog/artifactory +``` + +### Create a unique Master Key +Artifactory requires a unique master key. By default the chart has one set in values.yaml (`artifactory.masterKey`). + +**For production grade installations it is strongly recommended to use a custom master key. If you initially use the default master key it will be very hard to change the master key at a later stage** +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique one and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Pass the created master key to helm +helm upgrade --install artifactory --set artifactory.masterKey=${MASTER_KEY} --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you can create a secret containing the master key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export MASTER_KEY=$(openssl rand -hex 32) +echo ${MASTER_KEY} + +# Create a secret containing the key. The key in the secret must be named master-key +kubectl create secret generic my-secret --from-literal=master-key=${MASTER_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory --set artifactory.masterKeySecretName=my-secret --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** In either case, make sure to pass the same master key on all future calls to `helm install` and `helm upgrade`! In the first case, this means always passing `--set artifactory.masterKey=${MASTER_KEY}`. In the second, this means always passing `--set artifactory.masterKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Special Upgrade Notes +### MasterKey during 6.x to 7.x Migration (App version) + +**NOTE:** 6.x only supports masterKey with 16 hex (32 characters) and if you have set masterKey using `openssl rand -hex 32` (64 characters) in 6.x, only the first 32 characters are used and rest are ignored. Hence, during 6.x to 7.x migration, we trim first 32 characters and set masterkey, which implies 7.x still uses the trimmed masterkey of 6.x. Hence, `artifactory.masterKey` should not be passed during migration from 6.x to 7.x. + +### Create a unique Join Key +Artifactory requires a unique join key. By default the chart has one set in values.yaml (`artifactory.joinKey`). + +**This key is for demo purpose and should not be used in a production environment!** + +You should generate a unique key and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Pass the created join key to helm +helm upgrade --install artifactory --set artifactory.joinKey=${JOIN_KEY} --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you can create a secret containing the join key manually and pass it to the template at install/upgrade time. +```bash +# Create a key +export JOIN_KEY=$(openssl rand -hex 32) +echo ${JOIN_KEY} + +# Create a secret containing the key. The key in the secret must be named join-key +kubectl create secret generic my-secret --from-literal=join-key=${JOIN_KEY} + +# Pass the created secret to helm +helm upgrade --install artifactory --set artifactory.joinKeySecretName=my-secret --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** In either case, make sure to pass the same join key on all future calls to `helm install` and `helm upgrade`! This means always passing `--set artifactory.joinKey=${JOIN_KEY}`. In the second, this means always passing `--set artifactory.joinKeySecretName=my-secret` and ensuring the contents of the secret remain unchanged. + +### Customizing Database password +You can override the specified database password (set in [values.yaml](values.yaml)), by passing it as a parameter in the install command line +```bash +helm upgrade --install artifactory --namespace artifactory --set postgresql.postgresqlPassword=12_hX34qwerQ2 center/jfrog/artifactory +``` + +You can customise other parameters in the same way, by passing them on `helm install` command line. + +### Deleting Artifactory + +On helm v2: +```bash +helm delete --purge artifactory +``` + +On helm v3: +```bash +helm delete artifactory --namespace artifactory +``` +This will completely delete your Artifactory Pro deployment. +**IMPORTANT:** This will also delete your data volumes. You will lose all data! + +### Kubernetes Secret for Artifactory License +##### Use an existing secret +You can deploy the Artifactory license as a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/). +Prepare a text file with the license written in it and create a Kubernetes secret from it. +```bash +# Create the Kubernetes secret (assuming the local license file is 'art.lic') +kubectl create secret generic artifactory-license --from-file=./art.lic + +# Pass the license to helm +helm upgrade --install artifactory --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=art.lic --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + +##### Create the secret as part of the helm release +values.yaml +```yaml +artifactory: + license: + licenseKey: |- + +``` + +```bash +helm upgrade --install artifactory -f values.yaml --namespace artifactory center/jfrog/artifactory +``` +**NOTE:** This method is relevant for initial deployment only! Once Artifactory is deployed, you should not keep passing these parameters as the license is already persisted into Artifactory's storage (they will be ignored). +Updating the license should be done via Artifactory UI or REST API. +If you want to keep managing the artifactory license using the same method, you can use the copyOnEveryStartup example shown in the values.yaml file + + +### copyOnEveryStartup feature +Files stored in the `/artifactory-extra-conf` directory are only copied to the `ARTIFACTORY_HOME/etc` directory upon the first startup. +In some cases, you want your configuration files to be copied to the `ARTIFACTORY_HOME/etc` directory on every startup. +Two examples for that would be: + +1. the binarstore.xml file. If you use the default behaviour, your binarystore.xml configuration will only be copied on the first startup, +which means that changes you make over time to the `binaryStoreXml` configuration will not be applied. In order to make sure your changes are applied on every startup, do the following: +Create a values file with the following values: +```yaml +artifactory: + copyOnEveryStartup: + - source: /artifactory_bootstrap/binarystore.xml + target: etc/artifactory/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f values.yaml +``` + +2. Any custom configuration file you have to configure artifactory, such as `logback.xml`: +Create a config map with your `logback.xml` configuration. + +Create a values file with the following values: +```yaml +artifactory: + ## Create a volume pointing to the config map with your configuration file + customVolumes: | + - name: logback-xml-configmap + configMap: + name: logback-xml-configmap + customVolumeMounts: | + - name: logback-xml-configmap + mountPath: /tmp/artifactory-logback/ + copyOnEveryStartup: + - source: /tmp/artifactory-logback/* + target: etc/ +``` + +Install the helm chart with the values file you created: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f values.yaml +``` + +### Configure NetworkPolicy + +NetworkPolicy specifies what ingress and egress is allowed in this namespace. It is encouraged to be more specific whenever possible to increase security of the system. + +In the `networkpolicy` section of values.yaml you can specify a list of NetworkPolicy objects. + +For podSelector, ingress and egress, if nothing is provided then a default `- {}` is applied which is to allow everything. + +A full (but very wide open) example that results in 2 NetworkPolicy objects being created: +```yaml +networkpolicy: + # Allows all ingress and egress to/from artifactory. + - name: artifactory + podSelector: + matchLabels: + app: artifactory + egress: + - {} + ingress: + - {} + # Allows connectivity from artifactory pods to postgresql pods, but no traffic leaving postgresql pod. + - name: postgres + podSelector: + matchLabels: + app: postgresql + ingress: + - from: + - podSelector: + matchLabels: + app: artifactory +``` +### Artifactory JMX Configuration +** You can see some information about the exposed MBeans here - https://www.jfrog.com/confluence/display/RTF/Artifactory+JMX+MBeans + +Enable JMX in your deployment: +```bash +helm upgrade --install artifactory \ + --set artifactory.javaOpts.jmx.enabled=true \ + --namespace artifactory center/jfrog/artifactory +``` +This will enable access to Artifactory with JMX on the default port (9010). +** You have the option to change the port by setting ```artifactory.javaOpts.jmx.port``` to your choice of port + +In order to connect to Artifactory using JMX with jconsole (or any similar tool) installed on your computer, follow the following steps: +1. Enable JMX as described above and Change the Artifactory service to be of type LoadBalancer: +```bash +helm upgrade --install artifactory \ + --set artifactory.javaOpts.jmx.enabled=true \ + --set artifactory.service.type=LoadBalancer \ + --namespace artifactory center/jfrog/artifactory + +``` +2. The default setting for java.rmi.server.hostname is the service name (this is also configurable with ```artifactory.javaOpts.jmx.host```). +So in order to connect to Artifactory with jconsole you should map the Artifactory kuberentes service IP to the service name using your hosts file as such: +``` + artifactory- +``` +3. Launch jconsole with the service address and port: +```bash +jconsole artifactory-: +``` + +### Bootstrapping Artifactory admin password +You can bootstrap the `admin` user password as described in the [bootstrap Artifactory admin credentials](https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate) guide. + +1. Create `admin-creds-values.yaml` and provide the IP (By default 127.0.0.1) and password: +```yaml +artifactory: + admin: + ip: "" # Example: "*" to allow access from anywhere + username: "admin" + password: "" +``` + +2. Apply the `admin-creds-values.yaml` file: +```bash +helm upgrade --install artifactory --namespace artifactory center/jfrog/artifactory -f admin-creds-values.yaml +``` + +3. Restart Artifactory Pod (`Kubectl delete pod `) + +### Bootstrapping Artifactory configuration +**IMPORTANT:** Bootstrapping Artifactory needs license. Pass license as shown in above section. + +* User guide to [bootstrap Artifactory Global Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheGlobalConfiguration) +* User guide to [bootstrap Artifactory Security Configuration](https://www.jfrog.com/confluence/display/RTF/Configuration+Files#ConfigurationFiles-BootstrappingtheSecurityConfiguration) + +1. Create `bootstrap-config.yaml` with artifactory.config.import.xml and security.import.xml as shown below: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-release-bootstrap-config +data: + artifactory.config.import.xml: | + + security.import.xml: | + +``` + +2. Create configMap in Kubernetes: +```bash +kubectl apply -f bootstrap-config.yaml +``` +3. Pass the configMap to helm +```bash +helm upgrade --install artifactory --set artifactory.license.secret=artifactory-license,artifactory.license.dataKey=art.lic,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory center/jfrog/artifactory +``` +OR +```bash +helm upgrade --install artifactory --set artifactory.license.licenseKey=,artifactory.configMapName=my-release-bootstrap-config --namespace artifactory center/jfrog/artifactory +``` + +### Use custom nginx.conf with Nginx + +Steps to create configMap with nginx.conf +* Create `nginx.conf` file. +```bash +kubectl create configmap nginx-config --from-file=nginx.conf +``` +* Pass configMap to helm install +```bash +helm upgrade --install artifactory --set nginx.customConfigMap=nginx-config --namespace artifactory center/jfrog/artifactory +``` + +### Use an external Database + +**For production grade installations it is recommended to use an external PostgreSQL with a static password** + +#### PostgreSQL +There are cases where you will want to use an external PostgreSQL with a different database name e.g. `my-artifactory-db`, then you need set a custom PostgreSQL connection URL, where `my-artifactory-db` is the name of the database. + +This can be done with the following parameters +```bash +... +--set postgresql.enabled=false \ +--set database.type=postgresql \ +--set database.driver=org.postgresql.Driver \ +--set database.url='jdbc:postgresql://${DB_HOST}:${DB_PORT}/my-artifactory-db' \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! + +#### Other DB type +There are cases where you will want to use a different database and not the enclosed **PostgreSQL**. +See more details on [configuring the database](https://www.jfrog.com/confluence/display/RTF/Configuring+the+Database) +> The official Artifactory Docker images include the PostgreSQL database driver. +> For other database types, you will have to add the relevant database driver to Artifactory's tomcat/lib + +This can be done with the following parameters +```bash +# Make sure your Artifactory Docker image has the MySQL database driver in it +... +--set postgresql.enabled=false \ +--set artifactory.preStartCommand="mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" \ +--set database.type=mysql \ +--set database.driver=com.mysql.jdbc.Driver \ +--set database.url=${DB_URL} \ +--set database.user=${DB_USER} \ +--set database.password=${DB_PASSWORD} \ +... +``` +**NOTE:** You must set `postgresql.enabled=false` in order for the chart to use the `database.*` parameters. Without it, they will be ignored! +##### Configuring Artifactory with external Oracle database +To use artifactory with oracledb the required instant client library files, libaio has to be copied to tomcat lib. Also set LD_LIBRARY_PATH env variable. +1. Create a value file with the configuration +```yaml +postgresql: + enabled: false +database: + type: oracle + driver: oracle.jdbc.OracleDriver + url: + user: + password: +artifactory: + preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O instantclient-basic-linux.x64-19.6.0.0.0dbru.zip https://download.oracle.com/otn_software/linux/instantclient/19600/instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && unzip -jn instantclient-basic-linux.x64-19.6.0.0.0dbru.zip && wget -O libaio1_0.3.110-3_amd64.deb http://ftp.br.debian.org/debian/pool/main/liba/libaio/libaio1_0.3.110-3_amd64.deb && dpkg-deb -x libaio1_0.3.110-3_amd64.deb . && cp lib/x86_64-linux-gnu/* ." + extraEnvironmentVariables: + - name: LD_LIBRARY_PATH + value: /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib +``` +2. Install Artifactory with the values file you created: +```bash +helm upgrade --install artifactory center/jfrog/artifactory --namespace artifactory -f values-oracle.yaml +``` +**NOTE:** If its an upgrade from 6.x to 7.x, add same `preStartCommand` under `artifactory.migration.preStartCommand` + +#### Using pre-existing Kubernetes Secret +If you store your database credentials in a pre-existing Kubernetes `Secret`, you can specify them via `database.secrets` instead of `database.user` and `database.password`: +```bash +# Create a secret containing the database credentials +kubectl create secret generic my-secret --from-literal=user=${DB_USER} --from-literal=password=${DB_PASSWORD} +... +--set postgresql.enabled=false \ +--set database.secrets.user.name=my-secret \ +--set database.secrets.user.key=user \ +--set database.secrets.password.name=my-secret \ +--set database.secrets.password.key=password \ +--set database.secrets.url.name=my-secret \ +--set database.secrets.url.key=url \ +... +``` + +### Deleting Artifactory +To delete the Artifactory. + +On helm v2: +```bash +helm delete --purge artifactory +``` + +On helm v3: +```bash +helm delete artifactory --namespace artifactory +``` +This will completely delete your Artifactory HA cluster. + +### Custom Docker registry for your images +If you need to pull your Docker images from a private registry, you need to create a +[Kubernetes Docker registry secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) and pass it to helm +```bash +# Create a Docker registry secret called 'regsecret' +kubectl create secret docker-registry regsecret --docker-server= --docker-username= --docker-password= --docker-email= +``` +Once created, you pass it to `helm` +```bash +helm upgrade --install artifactory --set imagePullSecrets=regsecret --namespace artifactory center/jfrog/artifactory +``` + +### Logger sidecars +This chart provides the option to add sidecars to tail various logs from Artifactory. See the available values in [values.yaml](values.yaml) + +Get list of containers in the pod +```bash +kubectl get pods -n -o jsonpath='{.spec.containers[*].name}' | tr ' ' '\n' +``` + +View specific log +```bash +kubectl logs -n -c +``` + +### Custom init containers +There are cases where a special, unsupported init processes is needed like checking something on the file system or testing something before spinning up the main container. + +For this, there is a section for writing a custom init container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom init containers + customInitContainers: | + ## Init containers template goes here ## +``` + +### Custom sidecar containers +There are cases where an extra sidecar container is needed. For example monitoring agents or log collection. + +For this, there is a section for writing a custom sidecar container in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom sidecar containers + customSidecarContainers: | + ## Sidecar containers template goes here ## +``` + +### Custom volumes +If you need to use a custom volume in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom volumes in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + ## Custom volume comes here ## +``` + +### Custom secrets +If you need to add a custom secret in a custom init or sidecar container, you can use this option. + +For this, there is a section for defining custom secrets in the [values.yaml](values.yaml). By default it's commented out +```yaml +artifactory: + # Add custom secrets - secret per file + customSecrets: + - name: custom-secret + key: custom-secret.yaml + data: > + secret data +``` + +To use a custom secret, need to define a custom volume. +```yaml +artifactory: + ## Add custom volumes + customVolumes: | + - name: custom-secret + secret: + secretName: custom-secret +``` + +To use a volume, need to define a volume mount as part of a custom init or sidecar container. +```yaml +artifactory: + customSidecarContainers: + - name: side-car-container + volumeMounts: + - name: custom-secret + mountPath: /opt/custom-secret.yaml + subPath: custom-secret.yaml + readOnly: true +``` + +You can configure the sidecar to run as a custom user if needed by setting the following in the container template +```yaml + # Example of running container as root (id 0) + securityContext: + runAsUser: 0 + fsGroup: 0 +``` + +### Add Artifactory User Plugin during installation +If you need to add [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins), you can use this option. + +Create a secret with [Artifactory User Plugin](https://github.com/jfrog/artifactory-user-plugins) by following command: +```bash +# Secret with single user plugin +kubectl create secret generic archive-old-artifacts --from-file=archiveOldArtifacts.groovy --namespace=artifactory + +# Secret with single user plugin with configuration file +kubectl create secret generic webhook --from-file=webhook.groovy --from-file=webhook.config.json.sample --namespace=artifactory +``` + +Add plugin secret names to `plugins.yaml` as following: +```yaml +artifactory: + userPluginSecrets: + - archive-old-artifacts + - webhook +``` + +You can now pass the created `plugins.yaml` file to helm install command to deploy Artifactory with user plugins as follows: +```bash +helm upgrade --install artifactory -f plugins.yaml --namespace artifactory center/jfrog/artifactory +``` + +Alternatively, you may be in a situation in which you would like to create a secret in a Helm chart that depends on this chart. In this scenario, the name of the secret is likely dynamically generated via template functions, so passing a statically named secret isn't possible. In this case, the chart supports evaluating strings as templates via the [`tpl`](https://helm.sh/docs/charts_tips_and_tricks/#using-the-tpl-function) function - simply pass the raw string containing the templating language used to name your secret as a value instead by adding the following to your chart's `values.yaml` file: +```yaml +artifactory: # Name of the artifactory dependency + artifactory: + userPluginSecrets: + - '{{ template "my-chart.fullname" . }}' +``` +For additional information, please refer [here](https://www.jfrog.com/confluence/display/JFROG/User+Plugins). + +### Provide custom configMaps to Artifactory +If you want to mount a custom file to Artifactory, either an init shell script or a custom configuration file (such as `logback.xml`), you can use this option. + +Create a `configmaps.yaml` file with the following content: +```yaml +artifactory: + configMaps: | + logback.xml: | + + + + + %date [%-5level] \(%-20c{3}:%L\) %message%n + + + + + + + + + + + + + + + my-custom-post-start-hook.sh: | + echo "This is my custom post start hook" + + customVolumeMounts: | + - name: artifactory-configmaps + mountPath: /tmp/my-config-map + + postStartCommand: | + chmod +x /tmp/my-config-map/my-custom-post-start-hook.sh; + /tmp/my-config-map/my-custom-post-start-hook.sh; + + copyOnEveryStartup: + - source: /tmp/my-config-map/logback.xml + target: etc/ + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f configmaps.yaml --namespace artifactory center/jfrog/artifactory +``` + +This will, in turn: +* create a configMap with the files you specified above +* create a volume pointing to the configMap with the name `artifactory-configmaps` +* Mount said configMap onto `/tmp/my-config-map` using a `customVolumeMounts` +* Set the shell script we mounted as the `postStartCommand` +* Copy the `logback.xml` file to its proper location in the `$ARTIFACTORY_HOME/etc` directory. + +### Establishing TLS and Adding certificates +In HTTPS, the communication protocol is encrypted using Transport Layer Security (TLS). By default, TLS between JFrog Platform nodes is disabled. +When TLS is enabled, JFrog Access acts as the Certificate Authority (CA) signs the TLS certificates used by all the different JFrog Platform nodes. + +To establish TLS between JFrog Platform nodes: +Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. For more info, Please refer [here](https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates) + +To enable tls in charts, set `tls` to true under `access` in [values.yaml](values.yaml). By default it's false +```yaml +access: + accessConfig: + security: + tls: true +``` + +To add custom tls certificates, create a tls secret from the certificate files. + +```bash +kubectl create secret tls --cert=ca.crt --key=ca.private.key +``` + +For resetting access certificates , you can set `resetAccessCAKeys` to true under access section in [values.yaml](values.yaml) and perform an helm upgrade. +* Note : Once helm upgrade is done, set `resetAccessCAKeys` to false for subsequent upgrades (to avoid resetting access certificates on every helm upgrade) +```yaml +access: + accessConfig: + security: + tls: true + customCertificatesSecretName: + resetAccessCAKeys: true +``` + +### Artifactory filebeat +If you want to collect logs from your Artifactory installation and send them to a central log collection solution like ELK, you can use this option. + +Create a `filebeat.yaml` values file with the following content: +```yaml +filebeat: + enabled: true + logstashUrl: + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "100Mi" + cpu: "100m" +``` + +You can optionally customize the `filebeat.yaml` to send output to a different location like so: +```yaml +filebeat: + enabled: true + filebeatYml: | + +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f filebeat.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Install Artifactory with Nginx and Terminate SSL in Nginx Service(LoadBalancer). +To install the helm chart with performing SSL offload in the LoadBalancer layer of Nginx +For Ex: Using AWS ACM certificates to do SSL offload in the loadbalancer layer. +In order to do that, simply add the following to a `artifactory-ssl-values.yaml` file: +```yaml + nginx: + https: + enabled: false + service: + ssloffload: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:xx-xxxx:xxxxxxxx:certificate/xxxxxxxxxxxxx" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ssl-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +### Ingress and TLS +To get Helm to create an ingress object with a hostname, add these below lines to `artifactory-ingress-values.yaml` file +```yaml + ingress: + enabled: true + hosts: + - artifactory.company.com + artifactory: + service: + type: NodePort + nginx + enabled: false +``` + +and use it with you helm install/upgrade: +```bash +helm upgrade --install artifactory -f artifactory-ingress-values.yaml --namespace artifactory center/jfrog/artifactory +``` + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```bash +kubectl create secret tls artifactory-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the Artifactory Ingress TLS section of your custom `values.yaml` file: + +```yaml + ingress: + ## If true, Artifactory Ingress will be created + ## + enabled: true + + ## Artifactory Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - artifactory.domain.com + annotations: + kubernetes.io/tls-acme: "true" + ## Artifactory Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: artifactory-tls + hosts: + - artifactory.domain.com +``` + +### Ingress annotations + +This example specifically enables Artifactory to work as a Docker Registry using the Repository Path method. See [Artifactory as Docker Registry](https://www.jfrog.com/confluence/display/RTF/Getting+Started+with+Artifactory+as+a+Docker+Registry) documentation for more information about this setup. + +```yaml +ingress: + enabled: true + defaultBackend: + enabled: false + hosts: + - myhost.example.com + annotations: + ingress.kubernetes.io/force-ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + ingress.kubernetes.io/proxy-read-timeout: "600" + ingress.kubernetes.io/proxy-send-timeout: "600" + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite ^/(v2)/token /artifactory/api/docker/null/v2/token; + rewrite ^/(v2)/([^\/]*)/(.*) /artifactory/api/docker/$2/$1/$3; + nginx.ingress.kubernetes.io/proxy-body-size: "0" + tls: + - hosts: + - "myhost.example.com" +``` + +If you're using Artifactory as SSO provider (e.g. with xray), you will need to have the following annotations, and change with your domain: +```yaml +.. + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_pass_header Server; + proxy_set_header X-JFrog-Override-Base-Url https://; +``` + +### Ingress additional rules + +You have the option to add additional ingress rules to the Artifactory ingress. An example for this use case can be routing the /xray path to Xray. +In order to do that, simply add the following to a `artifactory-values.yaml` file: +```yaml +ingress: + enabled: true + + defaultBackend: + enabled: false + + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite "(?i)/xray(/|$)(.*)" /$2 break; + + additionalRules: | + - host: + http: + paths: + - path: / + backend: + serviceName: + servicePort: + - path: /xray + backend: + serviceName: + servicePort: + - path: /artifactory + backend: + serviceName: {{ template "artifactory.nginx.fullname" . }} + servicePort: {{ .Values.nginx.externalPortHttp }} +``` + +and running: +```bash +helm upgrade --install xray center/jfrog/artifactory -f artifactory-values.yaml +``` + +### Dedicated Ingress object for replicator service + +You have the option to add additional ingress object to the Replicator service. An example for this use case can be routing the /replicator/ path to Artifactory. +In order to do that, simply add the following to a `artifactory-values.yaml` file: + +```yaml +artifactory: + replicator: + enabled: true + ingress: + name: + hosts: + - myhost.example.com + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-buffering: "off" + nginx.ingress.kubernetes.io/configuration-snippet: | + chunked_transfer_encoding on; + tls: + - hosts: + - "myhost.example.com" + secretName: +``` + +### Ingress behind another load balancer +If you are running a load balancer, that is used to offload the TLS, in front of Nginx Ingress Controller, or if you are setting **X-Forwarded-*** headers, you might want to enable **'use-forwarded-headers=true'** option. Otherwise nginx will be filling those headers with the request information it receives from the external load balancer. + +To enable it with `helm install` +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx --set-string controller.config.use-forwarded-headers=true +``` +or `helm upgrade` +```bash +helm upgrade nginx-ingress --set-string controller.config.use-forwarded-headers=true center/kubernetes-ingress-nginx/ingress-nginx +``` +or create a values.yaml file with the following content: +```yaml +controller: + config: + use-forwarded-headers: "true" +``` +Then install nginx-ingress with the values file you created: +```bash +helm upgrade --install nginx-ingress --namespace nginx-ingress center/kubernetes-ingress-nginx/ingress-nginx -f values.yaml +``` +This will start sending your Artifactory logs to the log aggregator of your choice, based on your configuration in the `filebeatYml` + +### Log Analytics + +#### FluentD, Prometheus and Grafana + +To configure Prometheus and Grafana to gather metrics from Artifactory through the use of FluentD, please refer to the log analytics repo: + +https://github.com/jfrog/log-analytics-prometheus + +That repo contains a file `artifactory-values.yaml` that can be used to deploy Prometheus, Service Monitor, and Grafana with this chart. + + +## Useful links +- https://www.jfrog.com/confluence/display/EP/Getting+Started +- https://www.jfrog.com/confluence/display/RTF/Installing+Artifactory +- https://www.jfrog.com/confluence/ diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ReverseProxyConfiguration.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ReverseProxyConfiguration.md new file mode 100644 index 000000000..38c4a9eaa --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ReverseProxyConfiguration.md @@ -0,0 +1,140 @@ +# JFrog Artifactory Reverse Proxy Settings using Nginx + +#### Reverse Proxy +* To use Artifactory as docker registry it's mandatory to use Reverse Proxy. +* Artifactory provides a Reverse Proxy Configuration Generator screen in which you can fill in a set of fields to generate +the required configuration snippet which you can then download and install directly in the corresponding directory of your reverse proxy server. +* To learn about configuring NGINX or Apache for reverse proxy refer to documentation provided on [JFrog wiki](https://www.jfrog.com/confluence/display/RTF/Configuring+a+Reverse+Proxy) +* By default Artifactory helm chart uses Nginx for reverse proxy and load balancing. + +**Note**: Nginx image distributed with Artifactory helm chart is custom image managed and maintained by JFrog. + +#### Features of Artifactory Nginx +* Provides default configuration with self signed SSL certificate generated on each helm install/upgrade. +* Persist configuration and SSL certificate in `/var/opt/jfrog/nginx` directory + +#### Changing the default Artifactory nginx conf +Use a values.yaml file for changing the value of nginx.mainConf or nginx.artifactoryConf +These configuration will be mounted to the nginx container using a configmap. +For example: +1. Create a values file `nginx-values.yaml` with the following values: +```yaml +nginx: + artifactoryConf: | + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen {{ .Values.nginx.internalPortHttps }} ssl; + listen {{ .Values.nginx.internalPortHttp }} ; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.{{ include "artifactory.fullname" . }} {{ include "artifactory.fullname" . }}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/$1; + } + proxy_pass http://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } +``` + +2. Install/upgrade artifactory: +```bash +helm upgrade --install artifactory jfrog/artifactory -f nginx-values.yaml +``` + + +#### Steps to use static configuration for reverse proxy in nginx. +1. Get Artifactory service name using this command `kubectl get svc -n $NAMESPACE` + +2. Create `artifactory.conf` file with nginx configuration. More [nginx configuration examples](https://github.com/jfrog/artifactory-docker-examples/tree/master/files/nginx/conf.d) + + Following is example `artifactory.conf` + + **Note**: + * Create file with name `artifactory.conf` as it's fixed in configMap key. + * Replace `artifactory-artifactory` with service name taken from step 1. + + ```bash + ## add ssl entries when https has been set in config + ssl_certificate /var/opt/jfrog/nginx/ssl/tls.crt; + ssl_certificate_key /var/opt/jfrog/nginx/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + ## server configuration + server { + listen 443 ssl; + listen 80; + ## Change to you DNS name you use to access Artifactory + server_name ~(?.+)\.artifactory-artifactory artifactory-artifactory; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/$ /artifactory/webapp/ redirect; + rewrite ^/artifactory/?(/webapp)?$ /artifactory/webapp/ redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + rewrite ^/(v1|v2)/([^/]+)(.*)$ /artifactory/api/docker/$2/$1/$3; + rewrite ^/(v1|v2)/ /artifactory/api/docker/$1/; + chunked_transfer_encoding on; + client_max_body_size 0; + location /artifactory/ { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass http://artifactory-artifactory:8081/artifactory/$1 break; + } + proxy_pass http://artifactory-artifactory:8081/artifactory/; + proxy_set_header X-Artifactory-Override-Base-Url $http_x_forwarded_proto://$host:$server_port/artifactory; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + } + ``` + +3. Create configMap of `artifactory.conf` created with step above. + ```bash + kubectl create configmap art-nginx-conf --from-file=artifactory.conf + ``` +4. Deploy Artifactory using helm chart. + You can achieve this by providing the name of configMap created above to `nginx.customArtifactoryConfigMap` in [values.yaml](values.yaml) + + Following is command to set values at runtime: + ```bash + helm install --name artifactory nginx.customArtifactoryConfigMap=art-nginx-conf jfrog/artifactory + ``` \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/UPGRADE_NOTES.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/UPGRADE_NOTES.md new file mode 100644 index 000000000..4ba17d0c9 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/UPGRADE_NOTES.md @@ -0,0 +1,39 @@ +# JFrog Artifactory Chart Upgrade Notes +This file describes special upgrade notes needed at specific versions + +## Upgrade from 8.X to 9.X and above (Chart Versions) + +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* To upgrade from a version prior to 8.x, you first need to upgrade to latest version of 8.x as described in https://github.com/jfrog/charts/blob/master/stable/artifactory/CHANGELOG.md. +* Note: If you are upgrading from 8.x to 11.x and above chart versions, please delete the existing statefulset of postgresql before upgrading the chart due to breaking changes in postgresql subchart. +```bash +kubectl delete statefulsets -postgresql +``` + +## Upgrade from 7.X to 8.X (Chart Versions) +**DOWNTIME IS REQUIRED FOR AN UPGRADE!** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you!** +* PostgreSQL sub chart was upgraded to version `6.5.x`. This version is not backward compatible with the old version (`0.9.5`)! +* Note the following **PostgreSQL** Helm chart changes + * The chart configuration has changed! See [values.yaml](values.yaml) for the new keys used + * **PostgreSQL** is deployed as a StatefulSet + * See [PostgreSQL helm chart](https://hub.helm.sh/charts/stable/postgresql) for all available configurations +* Upgrade + * Due to breaking changes in the **PostgreSQL** Helm chart, a migration of the database is needed from the old to the new database + * The recommended migration process is the [full system export and import](https://www.jfrog.com/confluence/display/RTF/Importing+and+Exporting) + * **NOTE:** To save time, export only metadata and configuration (check `Exclude Content` in the `System Import & Export`) since the Artifactory filestore is persisted + * Upgrade steps: + 1. Block user access to Artifactory (do not shutdown) + 2. Perform `Export System` from the `Admin` -> `Import & Export` -> `System` -> `Export System` + a. Check `Exclude Content` to save export size (as Artifactory filestore will persist across upgrade) + b. Choose to save the export on the persisted Artifactory volume (`/var/opt/jfrog/artifactory/`) + c. Click `Export` (this can take some time) + 3. Run the `helm upgrade` with the new version. Old PostgreSQL will be removed and new one deployed + a. You must pass explicit "ready for upgrade flag" with `--set databaseUpgradeReady=yes`. Failing to provide this will block the upgrade! + 4. Once ready, open Artifactory UI (you might need to re-enter a valid license). Skip all onboarding wizard steps + a. **NOTE:** Don't worry you can't see the old config and files. It will all restore with the system import in the next step + 5. Perform `Import System` from the `Admin` -> `Import & Export` -> `System` -> `Import System` + a. Browse to where the export was saved Artifactory volume (`/var/opt/jfrog/artifactory/`) + b. Click `Import` (this can take some time) + 6. Restore access to Artifactory + * Artifactory should now be ready to get back to normal operation diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/.helmignore b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/Chart.yaml new file mode 100644 index 000000000..2f858e60e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +version: 9.3.4 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/README.md new file mode 100644 index 000000000..319291bc6 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/README.md @@ -0,0 +1,680 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. |`nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List |`nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `securityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works +```cosnole +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/.helmignore b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/Chart.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 000000000..e6bac7873 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.6.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. +home: http://www.bitnami.com/ +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +version: 0.6.2 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/README.md new file mode 100644 index 000000000..e04391a3f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/README.md @@ -0,0 +1,274 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +**Names** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +**Images** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +**Labels** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +**Storage** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +**TplValues** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +**Capabilities** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +**Validations** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | When a chart is using `bitnami/mariadb` as subchart you should use this to validate required password are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "context" $` | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +**Warnings** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +**Errors** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +**Utils** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | + +**Secrets** + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +**Example of use** + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possiblity of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +**NOTES.txt** + +``` +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_capabilities.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c0ea2c70c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_errors.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..d6d3ec65a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_images.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 000000000..aafde9f3b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_labels.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_names.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 000000000..adf2a74f4 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_secrets.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..d6165a294 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = cat $name .defaultNameSuffix -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_storage.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_tplvalues.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_utils.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..7d02f2ef6 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,26 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_validations.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100644 index 000000000..62635b30e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,219 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $valueKeyArray := splitList "." .valueKey -}} + {{- $value := "" -}} + {{- $latestObj := $.context.Values -}} + {{- range $valueKeyArray -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.valueKey | fail -}} + {{- end -}} + + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} + {{- end -}} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a mariadb required password must not be empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "context" $) }} + +Validate value params: + - secret - String - Required. Name of the secret where mysql values are stored, e.g: "mysql-passwords-secret" +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- if and (not .context.Values.mariadb.existingSecret) .context.Values.mariadb.enabled -}} + {{- $requiredPasswords := list -}} + + {{- if .context.Values.mariadb.secret.requirePasswords -}} + {{- $requiredRootMariadbPassword := dict "valueKey" "mariadb.rootUser.password" "secret" .secretName "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootMariadbPassword -}} + + {{- if not (empty .context.Values.mariadb.db.user) -}} + {{- $requiredMariadbPassword := dict "valueKey" "mariadb.db.password" "secret" .secretName "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredMariadbPassword -}} + {{- end -}} + + {{- if .context.Values.mariadb.replication.enabled -}} + {{- $requiredReplicationPassword := dict "valueKey" "mariadb.replication.password" "secret" .secretName "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a postgresql required password must not be empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) $enabled -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if $enabledReplication -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.enabled | quote -}} + {{- else -}} + true + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- .context.Values.postgresql.replication.enabled | quote -}} + {{- else -}} + {{- .context.Values.replication.enabled | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_warnings.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/values.yaml new file mode 100644 index 000000000..9ecdc93f5 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/commonAnnotations.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/commonAnnotations.yaml new file mode 100644 index 000000000..f6977823c --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 000000000..347d3b40a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/README.md new file mode 100644 index 000000000..1813a2fea --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/conf.d/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/conf.d/README.md new file mode 100644 index 000000000..184c1875d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 000000000..cba38091e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.lock b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.lock new file mode 100644 index 000000000..72e1642e2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.6.2 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-09-01T17:40:02.795096189Z" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.yaml new file mode 100644 index 000000000..2c28bfe14 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/NOTES.txt b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/NOTES.txt new file mode 100644 index 000000000..596e969ce --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/_helpers.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 000000000..68cd0dc0e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,501 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/configmap.yaml new file mode 100644 index 000000000..b29ef6040 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 000000000..f21a97654 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 000000000..6637867a3 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 000000000..6b7a3171e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 000000000..b993c9971 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 000000000..2a7b372fe --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/podsecuritypolicy.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..da0b3ab11 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 000000000..b0c41b1a4 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/role.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/role.yaml new file mode 100644 index 000000000..6d3cf50a4 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/rolebinding.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 000000000..f7837388d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/secrets.yaml new file mode 100644 index 000000000..c93dbe0bd --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 000000000..17f7ff399 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 000000000..d57b7fb48 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 000000000..54d24099f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,345 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 000000000..0e6eefebb --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,514 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "postgresql.tplValue" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "postgresql.tplValue" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-headless.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 000000000..49131578a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-read.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 000000000..885c7bb04 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc.yaml new file mode 100644 index 000000000..e9fc50456 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "postgresql.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "postgresql.tplValue" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "postgresql.tplValue" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values-production.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values-production.yaml new file mode 100644 index 000000000..c08014549 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values-production.yaml @@ -0,0 +1,594 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'on' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.schema.json b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.schema.json new file mode 100644 index 000000000..7b5e2efc3 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.yaml new file mode 100644 index 000000000..f45c4183d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/charts/postgresql/values.yaml @@ -0,0 +1,600 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r1 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r188 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/access-tls-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/access-tls-values.yaml new file mode 100644 index 000000000..4dabc956e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/access-tls-values.yaml @@ -0,0 +1,11 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + image: + tag: 12.3.0-debian-10-r71 + postgresqlPassword: password +access: + accessConfig: + security: + tls: true + resetAccessCAKeys: true diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/default-values.yaml new file mode 100644 index 000000000..a43d84d26 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/default-values.yaml @@ -0,0 +1,6 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +databaseUpgradeReady: true + +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/derby-test-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/derby-test-values.yaml new file mode 100644 index 000000000..cb86aaf54 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/derby-test-values.yaml @@ -0,0 +1,4 @@ +databaseUpgradeReady: true + +postgresql: + enabled: false diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/global-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/global-values.yaml new file mode 100644 index 000000000..8c964a822 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/global-values.yaml @@ -0,0 +1,47 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +global: + versions: + artifactory: 7.11.2 + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + customInitContainers: | + - name: "custom-setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + command: + - 'sh' + - '-c' + - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + volumeMounts: + - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + name: artifactory-volume + # Add custom volumes + customVolumes: | + - name: custom-script + emptyDir: + sizeLimit: 100Mi + # Add custom volumesMounts + customVolumeMounts: | + - name: custom-script + mountPath: "/scripts" + # Add custom sidecar containers + customSidecarContainers: | + - name: "sidecar-list-etc" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + securityContext: + allowPrivilegeEscalation: false + command: ["sh","-c","echo 'Sidecar is running' >> /scripts/sidecar.txt; cat /scripts/sidecar.txt; while true; do sleep 30; done"] + volumeMounts: + - mountPath: "/scripts" + name: custom-script + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/migration-disabled-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/migration-disabled-values.yaml new file mode 100644 index 000000000..f79cbc02f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/migration-disabled-values.yaml @@ -0,0 +1,7 @@ +databaseUpgradeReady: true +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + postgresqlPassword: password +artifactory: + migration: + enabled: false diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/test-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/test-values.yaml new file mode 100644 index 000000000..8adcd943f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/ci/test-values.yaml @@ -0,0 +1,13 @@ +databaseUpgradeReady: true +artifactory: + persistence: + enabled: true + +postgresql: + image: + tag: 9.6.18-debian-10-r7 + postgresqlPassword: password + postgresqlExtendedConf: + maxConnections: 102 + persistence: + enabled: true diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/files/migrate.sh b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/files/migrate.sh new file mode 100644 index 000000000..e35bfdbb2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/files/migrate.sh @@ -0,0 +1,4339 @@ +#!/bin/bash + +# Flags +FLAG_Y="y" +FLAG_N="n" +FLAGS_Y_N="$FLAG_Y $FLAG_N" +FLAG_NOT_APPLICABLE="_NA_" + +CURRENT_VERSION=$1 + +WRAPPER_SCRIPT_TYPE_RPMDEB="RPMDEB" +WRAPPER_SCRIPT_TYPE_DOCKER_COMPOSE="DOCKERCOMPOSE" + +SENSITIVE_KEY_VALUE="__sensitive_key_hidden___" + +# Shared system keys +SYS_KEY_SHARED_JFROGURL="shared.jfrogUrl" +SYS_KEY_SHARED_SECURITY_JOINKEY="shared.security.joinKey" +SYS_KEY_SHARED_SECURITY_MASTERKEY="shared.security.masterKey" + +SYS_KEY_SHARED_NODE_ID="shared.node.id" +SYS_KEY_SHARED_JAVAHOME="shared.javaHome" + +SYS_KEY_SHARED_DATABASE_TYPE="shared.database.type" +SYS_KEY_SHARED_DATABASE_TYPE_VALUE_POSTGRES="postgresql" +SYS_KEY_SHARED_DATABASE_DRIVER="shared.database.driver" +SYS_KEY_SHARED_DATABASE_URL="shared.database.url" +SYS_KEY_SHARED_DATABASE_USERNAME="shared.database.username" +SYS_KEY_SHARED_DATABASE_PASSWORD="shared.database.password" + +SYS_KEY_SHARED_ELASTICSEARCH_URL="shared.elasticsearch.url" +SYS_KEY_SHARED_ELASTICSEARCH_USERNAME="shared.elasticsearch.username" +SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD="shared.elasticsearch.password" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP="shared.elasticsearch.clusterSetup" +SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE="shared.elasticsearch.unicastFile" +SYS_KEY_SHARED_ELASTICSEARCH_CLUSTERSETUP_VALUE="YES" + +# Define this in product specific script. Should contain the path to unitcast file +# File used by insight server to write cluster active nodes info. This will be read by elasticsearch +#SYS_KEY_SHARED_ELASTICSEARCH_UNICASTFILE_VALUE="" + +SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME="shared.rabbitMq.active.node.name" +SYS_KEY_RABBITMQ_ACTIVE_NODE_IP="shared.rabbitMq.active.node.ip" + +# Filenames +FILE_NAME_SYSTEM_YAML="system.yaml" +FILE_NAME_JOIN_KEY="join.key" +FILE_NAME_MASTER_KEY="master.key" +FILE_NAME_INSTALLER_YAML="installer.yaml" + +# Global constants used in business logic +NODE_TYPE_STANDALONE="standalone" +NODE_TYPE_CLUSTER_NODE="node" +NODE_TYPE_DATABASE="database" + +# External(isable) databases +DATABASE_POSTGRES="POSTGRES" +DATABASE_ELASTICSEARCH="ELASTICSEARCH" +DATABASE_RABBITMQ="RABBITMQ" + +POSTGRES_LABEL="PostgreSQL" +ELASTICSEARCH_LABEL="Elasticsearch" +RABBITMQ_LABEL="Rabbitmq" + +ARTIFACTORY_LABEL="Artifactory" +JFMC_LABEL="Mission Control" +DISTRIBUTION_LABEL="Distribution" +XRAY_LABEL="Xray" + +POSTGRES_CONTAINER="postgres" +ELASTICSEARCH_CONTAINER="elasticsearch" +RABBITMQ_CONTAINER="rabbitmq" +REDIS_CONTAINER="redis" + +#Adding a small timeout before a read ensures it is positioned correctly in the screen +read_timeout=0.5 + +# Options related to data directory location +PROMPT_DATA_DIR_LOCATION="Installation Directory" +KEY_DATA_DIR_LOCATION="installer.data_dir" + +SYS_KEY_SHARED_NODE_HAENABLED="shared.node.haEnabled" +PROMPT_ADD_TO_CLUSTER="Are you adding an additional node to an existing product cluster?" +KEY_ADD_TO_CLUSTER="installer.ha" +VALID_VALUES_ADD_TO_CLUSTER="$FLAGS_Y_N" + +MESSAGE_POSTGRES_INSTALL="The installer can install a $POSTGRES_LABEL database, or you can connect to an existing compatible $POSTGRES_LABEL database\n(compatible databases: https://www.jfrog.com/confluence/display/JFROG/System+Requirements#SystemRequirements-RequirementsMatrix)" +PROMPT_POSTGRES_INSTALL="Do you want to install $POSTGRES_LABEL?" +KEY_POSTGRES_INSTALL="installer.install_postgresql" +VALID_VALUES_POSTGRES_INSTALL="$FLAGS_Y_N" + +# Postgres connection details +RPM_DEB_POSTGRES_HOME_DEFAULT="/var/opt/jfrog/postgres" +RPM_DEB_MESSAGE_STANDALONE_POSTGRES_DATA="$POSTGRES_LABEL home will have data and its configuration" +RPM_DEB_PROMPT_STANDALONE_POSTGRES_DATA="Type desired $POSTGRES_LABEL home location" +RPM_DEB_KEY_STANDALONE_POSTGRES_DATA="installer.postgresql.home" + +MESSAGE_DATABASE_URL="Provide the database connection details" +PROMPT_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://:/artifactory" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://:/mission_control?sslmode=disable" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://:/distribution?sslmode=disable" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://:/xraydb?sslmode=disable" + ;; + esac + if [ -z "$databaseURlExample" ]; then + echo -n "$POSTGRES_LABEL URL" # For consistency with username and password + return + fi + echo -n "$POSTGRES_LABEL url. Example: [$databaseURlExample]" +} +REGEX_DATABASE_URL(){ + local databaseURlExample= + case "$PRODUCT_NAME" in + $ARTIFACTORY_LABEL) + databaseURlExample="jdbc:postgresql://.*/artifactory.*" + ;; + $JFMC_LABEL) + databaseURlExample="postgresql://.*/mission_control.*" + ;; + $DISTRIBUTION_LABEL) + databaseURlExample="jdbc:postgresql://.*/distribution.*" + ;; + $XRAY_LABEL) + databaseURlExample="postgres://.*/xraydb.*" + ;; + esac + echo -n "^$databaseURlExample\$" +} +ERROR_MESSAGE_DATABASE_URL="Invalid $POSTGRES_LABEL URL" +KEY_DATABASE_URL="$SYS_KEY_SHARED_DATABASE_URL" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_USERNAME="$POSTGRES_LABEL username" +KEY_DATABASE_USERNAME="$SYS_KEY_SHARED_DATABASE_USERNAME" +#NOTE: It is important to display the label. Since the message may be hidden if URL is known +PROMPT_DATABASE_PASSWORD="$POSTGRES_LABEL password" +KEY_DATABASE_PASSWORD="$SYS_KEY_SHARED_DATABASE_PASSWORD" +IS_SENSITIVE_DATABASE_PASSWORD="$FLAG_Y" + +MESSAGE_STANDALONE_ELASTICSEARCH_INSTALL="The installer can install a $ELASTICSEARCH_LABEL database or you can connect to an existing compatible $ELASTICSEARCH_LABEL database" +PROMPT_STANDALONE_ELASTICSEARCH_INSTALL="Do you want to install $ELASTICSEARCH_LABEL?" +KEY_STANDALONE_ELASTICSEARCH_INSTALL="installer.install_elasticsearch" +VALID_VALUES_STANDALONE_ELASTICSEARCH_INSTALL="$FLAGS_Y_N" + +# Elasticsearch connection details +MESSAGE_ELASTICSEARCH_DETAILS="Provide the $ELASTICSEARCH_LABEL connection details" +PROMPT_ELASTICSEARCH_URL="$ELASTICSEARCH_LABEL URL" +KEY_ELASTICSEARCH_URL="$SYS_KEY_SHARED_ELASTICSEARCH_URL" + +PROMPT_ELASTICSEARCH_USERNAME="$ELASTICSEARCH_LABEL username" +KEY_ELASTICSEARCH_USERNAME="$SYS_KEY_SHARED_ELASTICSEARCH_USERNAME" + +PROMPT_ELASTICSEARCH_PASSWORD="$ELASTICSEARCH_LABEL password" +KEY_ELASTICSEARCH_PASSWORD="$SYS_KEY_SHARED_ELASTICSEARCH_PASSWORD" +IS_SENSITIVE_ELASTICSEARCH_PASSWORD="$FLAG_Y" + +# Cluster related questions +MESSAGE_CLUSTER_MASTER_KEY="Provide the cluster's master key. It can be found in the data directory of the first node under /etc/security/master.key" +PROMPT_CLUSTER_MASTER_KEY="Master Key" +KEY_CLUSTER_MASTER_KEY="$SYS_KEY_SHARED_SECURITY_MASTERKEY" +IS_SENSITIVE_CLUSTER_MASTER_KEY="$FLAG_Y" + +MESSAGE_JOIN_KEY="The Join key is the secret key used to establish trust between services in the JFrog Platform.\n(You can copy the Join Key from Admin > Security > Settings)" +PROMPT_JOIN_KEY="Join Key" +KEY_JOIN_KEY="$SYS_KEY_SHARED_SECURITY_JOINKEY" +IS_SENSITIVE_JOIN_KEY="$FLAG_Y" +REGEX_JOIN_KEY="^[a-zA-Z0-9]{16,}\$" +ERROR_MESSAGE_JOIN_KEY="Invalid Join Key" + +# Rabbitmq related cluster information +MESSAGE_RABBITMQ_ACTIVE_NODE_NAME="Provide an active ${RABBITMQ_LABEL} node name. Run the command [ hostname -s ] on any of the existing nodes in the product cluster to get this" +PROMPT_RABBITMQ_ACTIVE_NODE_NAME="${RABBITMQ_LABEL} active node name" +KEY_RABBITMQ_ACTIVE_NODE_NAME="$SYS_KEY_RABBITMQ_ACTIVE_NODE_NAME" + +# Rabbitmq related cluster information (necessary only for docker-compose) +PROMPT_RABBITMQ_ACTIVE_NODE_IP="${RABBITMQ_LABEL} active node ip" +KEY_RABBITMQ_ACTIVE_NODE_IP="$SYS_KEY_RABBITMQ_ACTIVE_NODE_IP" + +MESSAGE_JFROGURL(){ + echo -e "The JFrog URL allows ${PRODUCT_NAME} to connect to a JFrog Platform Instance.\n(You can copy the JFrog URL from Admin > Security > Settings)" +} +PROMPT_JFROGURL="JFrog URL" +KEY_JFROGURL="$SYS_KEY_SHARED_JFROGURL" +REGEX_JFROGURL="^https?://.*:{0,}[0-9]{0,4}\$" +ERROR_MESSAGE_JFROGURL="Invalid JFrog URL" + + +# Set this to FLAG_Y on upgrade +IS_UPGRADE="${FLAG_N}" + +# This belongs in JFMC but is the ONLY one that needs it so keeping it here for now. Can be made into a method and overridden if necessary +MESSAGE_MULTIPLE_PG_SCHEME="Please setup $POSTGRES_LABEL with schema as described in https://www.jfrog.com/confluence/display/JFROG/Installing+Mission+Control" + +_getMethodOutputOrVariableValue() { + unset EFFECTIVE_MESSAGE + local keyToSearch=$1 + local effectiveMessage= + local result="0" + # logSilly "Searching for method: [$keyToSearch]" + LC_ALL=C type "$keyToSearch" > /dev/null 2>&1 || result="$?" + if [[ "$result" == "0" ]]; then + # logSilly "Found method for [$keyToSearch]" + EFFECTIVE_MESSAGE="$($keyToSearch)" + return + fi + eval EFFECTIVE_MESSAGE=\${$keyToSearch} + if [ ! -z "$EFFECTIVE_MESSAGE" ]; then + return + fi + # logSilly "Didn't find method or variable for [$keyToSearch]" +} + + +# REF https://misc.flogisoft.com/bash/tip_colors_and_formatting +cClear="\e[0m" +cBlue="\e[38;5;69m" +cRedDull="\e[1;31m" +cYellow="\e[1;33m" +cRedBright="\e[38;5;197m" +cBold="\e[1m" + + +_loggerGetModeRaw() { + local MODE="$1" + case $MODE in + INFO) + printf "" + ;; + DEBUG) + printf "%s" "[${MODE}] " + ;; + WARN) + printf "${cRedDull}%s%s${cClear}" "[" "${MODE}" "] " + ;; + ERROR) + printf "${cRedBright}%s%s${cClear}" "[" "${MODE}" "] " + ;; + esac +} + + +_loggerGetMode() { + local MODE="$1" + case $MODE in + INFO) + printf "${cBlue}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + DEBUG) + printf "%-7s" "[${MODE}]" + ;; + WARN) + printf "${cRedDull}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + ERROR) + printf "${cRedBright}%s%-5s%s${cClear}" "[" "${MODE}" "]" + ;; + esac +} + +# Capitalises the first letter of the message +_loggerGetMessage() { + local originalMessage="$*" + local firstChar=$(echo "${originalMessage:0:1}" | awk '{ print toupper($0) }') + local resetOfMessage="${originalMessage:1}" + echo "$firstChar$resetOfMessage" +} + +# The spec also says content should be left-trimmed but this is not necessary in our case. We don't reach the limit. +_loggerGetStackTrace() { + printf "%s%-30s%s" "[" "$1:$2" "]" +} + +_loggerGetThread() { + printf "%s" "[main]" +} + +_loggerGetServiceType() { + printf "%s%-5s%s" "[" "shell" "]" +} + +#Trace ID is not applicable to scripts +_loggerGetTraceID() { + printf "%s" "[]" +} + +logRaw() { + echo "" + printf "$1" + echo "" +} + +logBold(){ + echo "" + printf "${cBold}$1${cClear}" + echo "" +} + +# The date binary works differently based on whether it is GNU/BSD +is_date_supported=0 +date --version > /dev/null 2>&1 || is_date_supported=1 +IS_GNU=$(echo $is_date_supported) + +_loggerGetTimestamp() { + if [ "${IS_GNU}" == "0" ]; then + echo -n $(date -u +%FT%T.%3NZ) + else + echo -n $(date -u +%FT%T.000Z) + fi +} + +# https://www.shellscript.sh/tips/spinner/ +_spin() +{ + spinner="/|\\-/|\\-" + while : + do + for i in `seq 0 7` + do + echo -n "${spinner:$i:1}" + echo -en "\010" + sleep 1 + done + done +} + +showSpinner() { + # Start the Spinner: + _spin & + # Make a note of its Process ID (PID): + SPIN_PID=$! + # Kill the spinner on any signal, including our own exit. + trap "kill -9 $SPIN_PID" `seq 0 15` &> /dev/null || return 0 +} + +stopSpinner() { + local occurrences=$(ps -ef | grep -wc "${SPIN_PID}") + let "occurrences+=0" + # validate that it is present (2 since this search itself will show up in the results) + if [ $occurrences -gt 1 ]; then + kill -9 $SPIN_PID &>/dev/null || return 0 + wait $SPIN_ID &>/dev/null + fi +} + +_getEffectiveMessage(){ + local MESSAGE="$1" + local MODE=${2-"INFO"} + + if [ -z "$CONTEXT" ]; then + CONTEXT=$(caller) + fi + + _EFFECTIVE_MESSAGE= + if [ -z "$LOG_BEHAVIOR_ADD_META" ]; then + _EFFECTIVE_MESSAGE="$(_loggerGetModeRaw $MODE)$(_loggerGetMessage $MESSAGE)" + else + local SERVICE_TYPE="script" + local TRACE_ID="" + local THREAD="main" + + local CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}') + local CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}') + + _EFFECTIVE_MESSAGE="$(_loggerGetTimestamp) $(_loggerGetServiceType) $(_loggerGetMode $MODE) $(_loggerGetTraceID) $(_loggerGetStackTrace $CONTEXT_FILE $CONTEXT_LINE) $(_loggerGetThread) - $(_loggerGetMessage $MESSAGE)" + fi + CONTEXT= +} + +# Important - don't call any log method from this method. Will become an infinite loop. Use echo to debug +_logToFile() { + local MODE=${1-"INFO"} + local targetFile="$LOG_BEHAVIOR_ADD_REDIRECTION" + # IF the file isn't passed, abort + if [ -z "$targetFile" ]; then + return + fi + # IF this is not being run in verbose mode and mode is debug or lower, abort + if [ "${VERBOSE_MODE}" != "$FLAG_Y" ] && [ "${VERBOSE_MODE}" != "true" ] && [ "${VERBOSE_MODE}" != "debug" ]; then + if [ "$MODE" == "DEBUG" ] || [ "$MODE" == "SILLY" ]; then + return + fi + fi + + # Create the file if it doesn't exist + if [ ! -f "${targetFile}" ]; then + return + # touch $targetFile > /dev/null 2>&1 || true + fi + # # Make it readable + # chmod 640 $targetFile > /dev/null 2>&1 || true + + # Log contents + printf "%s\n" "$_EFFECTIVE_MESSAGE" >> "$targetFile" || true +} + +logger() { + if [ "$LOG_BEHAVIOR_ADD_NEW_LINE" == "$FLAG_Y" ]; then + echo "" + fi + _getEffectiveMessage "$@" + local MODE=${2-"INFO"} + printf "%s\n" "$_EFFECTIVE_MESSAGE" + _logToFile "$MODE" +} + +logDebug(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "$FLAG_Y" ] || [ "${VERBOSE_MODE}" == "true" ] || [ "${VERBOSE_MODE}" == "debug" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logSilly(){ + VERBOSE_MODE=${VERBOSE_MODE-"false"} + CONTEXT=$(caller) + if [ "${VERBOSE_MODE}" == "silly" ];then + logger "$1" "DEBUG" + else + logger "$1" "DEBUG" >&6 + fi + CONTEXT= +} + +logError() { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= +} + +errorExit () { + CONTEXT=$(caller) + logger "$1" "ERROR" + CONTEXT= + exit 1 +} + +warn () { + CONTEXT=$(caller) + logger "$1" "WARN" + CONTEXT= +} + +note () { + CONTEXT=$(caller) + logger "$1" "NOTE" + CONTEXT= +} + +bannerStart() { + title=$1 + echo + echo -e "\033[1m${title}\033[0m" + echo +} + +bannerSection() { + title=$1 + echo + echo -e "******************************** ${title} ********************************" + echo +} + +bannerSubSection() { + title=$1 + echo + echo -e "************** ${title} *******************" + echo +} + +bannerMessge() { + title=$1 + echo + echo -e "********************************" + echo -e "${title}" + echo -e "********************************" + echo +} + +setRed () { + local input="$1" + echo -e \\033[31m${input}\\033[0m +} +setGreen () { + local input="$1" + echo -e \\033[32m${input}\\033[0m +} +setYellow () { + local input="$1" + echo -e \\033[33m${input}\\033[0m +} + +logger_addLinebreak () { + echo -e "---\n" +} + +bannerImportant() { + title=$1 + local bold="\033[1m" + local noColour="\033[0m" + echo + echo -e "${bold}######################################## IMPORTANT ########################################${noColour}" + echo -e "${bold}${title}${noColour}" + echo -e "${bold}###########################################################################################${noColour}" + echo +} + +bannerEnd() { + #TODO pass a title and calculate length dynamically so that start and end look alike + echo + echo "*****************************************************************************" + echo +} + +banner() { + title=$1 + content=$2 + bannerStart "${title}" + echo -e "$content" +} + +# The logic below helps us redirect content we'd normally hide to the log file. + # + # We have several commands which clutter the console with output and so use + # `cmd > /dev/null` - this redirects the command's output to null. + # + # However, the information we just hid maybe useful for support. Using the code pattern + # `cmd >&6` (instead of `cmd> >/dev/null` ), the command's output is hidden from the console + # but redirected to the installation log file + # + +#Default value of 6 is just null +exec 6>>/dev/null +redirectLogsToFile() { + echo "" + # local file=$1 + + # [ ! -z "${file}" ] || return 0 + + # local logDir=$(dirname "$file") + + # if [ ! -f "${file}" ]; then + # [ -d "${logDir}" ] || mkdir -p ${logDir} || \ + # ( echo "WARNING : Could not create parent directory (${logDir}) to redirect console log : ${file}" ; return 0 ) + # fi + + # #6 now points to the log file + # exec 6>>${file} + # #reference https://unix.stackexchange.com/questions/145651/using-exec-and-tee-to-redirect-logs-to-stdout-and-a-log-file-in-the-same-time + # exec 2>&1 > >(tee -a "${file}") +} + +# Check if a give key contains any sensitive string as part of it +# Based on the result, the caller can decide its value can be displayed or not +# Sample usage : isKeySensitive "${key}" && displayValue="******" || displayValue=${value} +isKeySensitive(){ + local key=$1 + local sensitiveKeys="password|secret|key|token" + + if [ -z "${key}" ]; then + return 1 + else + local lowercaseKey=$(echo "${key}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + [[ "${lowercaseKey}" =~ ${sensitiveKeys} ]] && return 0 || return 1 + fi +} + +getPrintableValueOfKey(){ + local displayValue= + local key="$1" + if [ -z "$key" ]; then + # This is actually an incorrect usage of this method but any logging will cause unexpected content in the caller + echo -n "" + return + fi + + local value="$2" + isKeySensitive "${key}" && displayValue="$SENSITIVE_KEY_VALUE" || displayValue="${value}" + echo -n $displayValue +} + +_createConsoleLog(){ + if [ -z "${JF_PRODUCT_HOME}" ]; then + return + fi + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + mkdir -p "${JF_PRODUCT_HOME}/var/log" || true + if [ ! -f ${targetFile} ]; then + touch $targetFile > /dev/null 2>&1 || true + fi + chmod 640 $targetFile > /dev/null 2>&1 || true +} + +# Output from application's logs are piped to this method. It checks a configuration variable to determine if content should be logged to +# the common console.log file +redirectServiceLogsToFile() { + + local result="0" + # check if the function getSystemValue exists + LC_ALL=C type getSystemValue > /dev/null 2>&1 || result="$?" + if [[ "$result" != "0" ]]; then + warn "Couldn't find the systemYamlHelper. Skipping log redirection" + return 0 + fi + + getSystemValue "shared.consoleLog" "NOT_SET" + if [[ "${YAML_VALUE}" == "false" ]]; then + logger "Redirection is set to false. Skipping log redirection" + return 0; + fi + + if [ -z "${JF_PRODUCT_HOME}" ] || [ "${JF_PRODUCT_HOME}" == "" ]; then + warn "JF_PRODUCT_HOME is unavailable. Skipping log redirection" + return 0 + fi + + local targetFile="${JF_PRODUCT_HOME}/var/log/console.log" + + _createConsoleLog + + while read -r line; do + printf '%s\n' "${line}" >> $targetFile || return 0 # Don't want to log anything - might clutter the screen + done +} + +## Display environment variables starting with JF_ along with its value +## Value of sensitive keys will be displayed as "******" +## +## Sample Display : +## +## ======================== +## JF Environment variables +## ======================== +## +## JF_SHARED_NODE_ID : locahost +## JF_SHARED_JOINKEY : ****** +## +## +displayEnv() { + local JFEnv=$(printenv | grep ^JF_ 2>/dev/null) + local key= + local value= + + if [ -z "${JFEnv}" ]; then + return + fi + + cat << ENV_START_MESSAGE + +======================== +JF Environment variables +======================== +ENV_START_MESSAGE + + for entry in ${JFEnv}; do + key=$(echo "${entry}" | awk -F'=' '{print $1}') + value=$(echo "${entry}" | awk -F'=' '{print $2}') + + isKeySensitive "${key}" && value="******" || value=${value} + + printf "\n%-35s%s" "${key}" " : ${value}" + done + echo; +} + +_addLogRotateConfiguration() { + logDebug "Method ${FUNCNAME[0]}" + # mandatory inputs + local confFile="$1" + local logFile="$2" + + # Method available in _ioOperations.sh + LC_ALL=C type io_setYQPath > /dev/null 2>&1 || return 1 + + io_setYQPath + + # Method available in _systemYamlHelper.sh + LC_ALL=C type getSystemValue > /dev/null 2>&1 || return 1 + + local frequency="daily" + local archiveFolder="archived" + + local compressLogFiles= + getSystemValue "shared.logging.rotation.compress" "true" + if [[ "${YAML_VALUE}" == "true" ]]; then + compressLogFiles="compress" + fi + + getSystemValue "shared.logging.rotation.maxFiles" "10" + local noOfBackupFiles="${YAML_VALUE}" + + getSystemValue "shared.logging.rotation.maxSizeMb" "25" + local sizeOfFile="${YAML_VALUE}M" + + logDebug "Adding logrotate configuration for [$logFile] to [$confFile]" + + # Add configuration to file + local confContent=$(cat << LOGROTATECONF +$logFile { + $frequency + missingok + rotate $noOfBackupFiles + $compressLogFiles + notifempty + olddir $archiveFolder + dateext + extension .log + dateformat -%Y-%m-%d + size ${sizeOfFile} +} +LOGROTATECONF +) + echo "${confContent}" > ${confFile} || return 1 +} + +_operationIsBySameUser() { + local targetUser="$1" + local currentUserID=$(id -u) + local currentUserName=$(id -un) + + if [ $currentUserID == $targetUser ] || [ $currentUserName == $targetUser ]; then + echo -n "yes" + else + echo -n "no" + fi +} + +_addCronJobForLogrotate() { + logDebug "Method ${FUNCNAME[0]}" + + # Abort if logrotate is not available + [ "$(io_commandExists 'crontab')" != "yes" ] && warn "cron is not available" && return 1 + + # mandatory inputs + local productHome="$1" + local confFile="$2" + local cronJobOwner="$3" + + # We want to use our binary if possible. It may be more recent than the one in the OS + local logrotateBinary="$productHome/app/third-party/logrotate/logrotate" + + if [ ! -f "$logrotateBinary" ]; then + logrotateBinary="logrotate" + [ "$(io_commandExists 'logrotate')" != "yes" ] && warn "logrotate is not available" && return 1 + fi + local cmd="$logrotateBinary ${confFile} --state $productHome/var/etc/logrotate/logrotate-state" #--verbose + + id -u $cronJobOwner > /dev/null 2>&1 || { warn "User $cronJobOwner does not exist. Aborting logrotate configuration" && return 1; } + + # Remove the existing line + removeLogRotation "$productHome" "$cronJobOwner" || true + + # Run logrotate daily at 23:55 hours + local cronInterval="55 23 * * * $cmd" + + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + # If this is standalone mode, we cannot use -u - the user running this process may not have the necessary privileges + if [ "$standaloneMode" == "no" ]; then + (crontab -l -u $cronJobOwner 2>/dev/null; echo "$cronInterval") | crontab -u $cronJobOwner - + else + (crontab -l 2>/dev/null; echo "$cronInterval") | crontab - + fi +} + +## Configure logrotate for a product +## Failure conditions: +## If logrotation could not be setup for some reason +## Parameters: +## $1: The product name +## $2: The product home +## Depends on global: none +## Updates global: none +## Returns: NA + +configureLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + + # mandatory inputs + local productName="$1" + if [ -z $productName ]; then + warn "Incorrect usage. A product name is necessary for configuring log rotation" && return 1 + fi + + local productHome="$2" + if [ -z $productHome ]; then + warn "Incorrect usage. A product home folder is necessary for configuring log rotation" && return 1 + fi + + local logFile="${productHome}/var/log/console.log" + if [[ $(uname) == "Darwin" ]]; then + logger "Log rotation for [$logFile] has not been configured. Please setup manually" + return 0 + fi + + local userID="$3" + if [ -z $userID ]; then + warn "Incorrect usage. A userID is necessary for configuring log rotation" && return 1 + fi + + local groupID=${4:-$userID} + local logConfigOwner=${5:-$userID} + + logDebug "Configuring log rotation as user [$userID], group [$groupID], effective cron User [$logConfigOwner]" + + local errorMessage="Could not configure logrotate. Please configure log rotation of the file: [$logFile] manually" + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + # TODO move to recursive method + createDir "${productHome}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/log/archived" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + + # TODO move to recursive method + createDir "${productHome}/var/etc" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + createDir "${productHome}/var/etc/logrotate" "$logConfigOwner" || { warn "${errorMessage}" && return 1; } + + # conf file should be owned by the user running the script + createFile "${confFile}" "${logConfigOwner}" || { warn "Could not create configuration file [$confFile]" return 1; } + + _addLogRotateConfiguration "${confFile}" "${logFile}" "$userID" "$groupID" || { warn "${errorMessage}" && return 1; } + _addCronJobForLogrotate "${productHome}" "${confFile}" "${logConfigOwner}" || { warn "${errorMessage}" && return 1; } +} + +_pauseExecution() { + if [ "${VERBOSE_MODE}" == "debug" ]; then + + local breakPoint="$1" + if [ ! -z "$breakPoint" ]; then + printf "${cBlue}Breakpoint${cClear} [$breakPoint] " + echo "" + fi + printf "${cBlue}Press enter once you are ready to continue${cClear}" + read -s choice + echo "" + fi +} + +# removeLogRotation "$productHome" "$cronJobOwner" || true +removeLogRotation() { + logDebug "Method ${FUNCNAME[0]}" + if [[ $(uname) == "Darwin" ]]; then + logDebug "Not implemented for Darwin." + return 0 + fi + local productHome="$1" + local cronJobOwner="$2" + local standaloneMode=$(_operationIsBySameUser "$cronJobOwner") + + local confFile="${productHome}/var/etc/logrotate/logrotate.conf" + + if [ "$standaloneMode" == "no" ]; then + crontab -l -u $cronJobOwner 2>/dev/null | grep -v "$confFile" | crontab -u $cronJobOwner - + else + crontab -l 2>/dev/null | grep -v "$confFile" | crontab - + fi +} + +# NOTE: This method does not check the configuration to see if redirection is necessary. +# This is intentional. If we don't redirect, tomcat logs might get redirected to a folder/file +# that does not exist, causing the service itself to not start +setupTomcatRedirection() { + logDebug "Method ${FUNCNAME[0]}" + local consoleLog="${JF_PRODUCT_HOME}/var/log/console.log" + _createConsoleLog + export CATALINA_OUT="${consoleLog}" +} + +setupScriptLogsRedirection() { + logDebug "Method ${FUNCNAME[0]}" + if [ -z "${JF_PRODUCT_HOME}" ]; then + logDebug "No JF_PRODUCT_HOME. Returning" + return + fi + # Create the console.log file if it is not already present + # _createConsoleLog || true + # # Ensure any logs (logger/logError/warn) also get redirected to the console.log + # # Using installer.log as a temparory fix. Please change this to console.log once INST-291 is fixed + export LOG_BEHAVIOR_ADD_REDIRECTION="${JF_PRODUCT_HOME}/var/log/console.log" + export LOG_BEHAVIOR_ADD_META="$FLAG_Y" +} + +# Returns Y if this method is run inside a container +isRunningInsideAContainer() { + if [ -f "/.dockerenv" ]; then + echo -n "$FLAG_Y" + else + echo -n "$FLAG_N" + fi +} + +POSTGRES_USER=999 +NGINX_USER=104 +NGINX_GROUP=107 +ES_USER=1000 +REDIS_USER=999 +MONGO_USER=999 +RABBITMQ_USER=999 +LOG_FILE_PERMISSION=640 +PID_FILE_PERMISSION=644 + +# Copy file +copyFile(){ + local source=$1 + local target=$2 + local mode=${3:-overwrite} + local enableVerbose=${4:-"${FLAG_N}"} + local verboseFlag="" + + if [ ! -z "${enableVerbose}" ] && [ "${enableVerbose}" == "${FLAG_Y}" ]; then + verboseFlag="-v" + fi + + if [[ ! ( $source && $target ) ]]; then + warn "Source and target is mandatory to copy file" + return 1 + fi + + if [[ -f "${target}" ]]; then + [[ "$mode" = "overwrite" ]] && ( cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}") || true + else + cp ${verboseFlag} -f "$source" "$target" || errorExit "Unable to copy file, command : cp -f ${source} ${target}" + fi +} + +# Copy files recursively from given source directory to destination directory +# This method wil copy but will NOT overwrite +# Destination will be created if its not available +copyFilesNoOverwrite(){ + local src=$1 + local dest=$2 + local enableVerboseCopy="${3:-${FLAG_Y}}" + + if [[ -z "${src}" || -z "${dest}" ]]; then + return + fi + + if [ -d "${src}" ] && [ "$(ls -A ${src})" ]; then + local relativeFilePath="" + local targetFilePath="" + + for file in $(find ${src} -type f 2>/dev/null) ; do + # Derive relative path and attach it to destination + # Example : + # src=/extra_config + # dest=/var/opt/jfrog/artifactory/etc + # file=/extra_config/config.xml + # relativeFilePath=config.xml + # targetFilePath=/var/opt/jfrog/artifactory/etc/config.xml + relativeFilePath=${file/${src}/} + targetFilePath=${dest}${relativeFilePath} + + createDir "$(dirname "$targetFilePath")" + copyFile "${file}" "${targetFilePath}" "no_overwrite" "${enableVerboseCopy}" + done + fi +} + +# TODO : WINDOWS ? +# Check the max open files and open processes set on the system +checkULimits () { + local minMaxOpenFiles=${1:-32000} + local minMaxOpenProcesses=${2:-1024} + local setValue=${3:-true} + local warningMsgForFiles=${4} + local warningMsgForProcesses=${5} + + logger "Checking open files and processes limits" + + local currentMaxOpenFiles=$(ulimit -n) + logger "Current max open files is $currentMaxOpenFiles" + if [ ${currentMaxOpenFiles} != "unlimited" ] && [ "$currentMaxOpenFiles" -lt "$minMaxOpenFiles" ]; then + if [ "${setValue}" ]; then + ulimit -n "${minMaxOpenFiles}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForFiles}" ] || warn "${warningMsgForFiles}" + else + errorExit "Max number of open files $currentMaxOpenFiles, is too low. Cannot run the application!" + fi + fi + + local currentMaxOpenProcesses=$(ulimit -u) + logger "Current max open processes is $currentMaxOpenProcesses" + if [ "$currentMaxOpenProcesses" != "unlimited" ] && [ "$currentMaxOpenProcesses" -lt "$minMaxOpenProcesses" ]; then + if [ "${setValue}" ]; then + ulimit -u "${minMaxOpenProcesses}" >/dev/null 2>&1 || warn "Max number of open files $currentMaxOpenFiles is low!" + [ -z "${warningMsgForProcesses}" ] || warn "${warningMsgForProcesses}" + else + errorExit "Max number of open files $currentMaxOpenProcesses, is too low. Cannot run the application!" + fi + fi +} + +createDirs() { + local appDataDir=$1 + local serviceName=$2 + local folders="backup bootstrap data etc logs work" + + [ -z "${appDataDir}" ] && errorExit "An application directory is mandatory to create its data structure" || true + [ -z "${serviceName}" ] && errorExit "A service name is mandatory to create service data structure" || true + + for folder in ${folders} + do + folder=${appDataDir}/${folder}/${serviceName} + if [ ! -d "${folder}" ]; then + logger "Creating folder : ${folder}" + mkdir -p "${folder}" || errorExit "Failed to create ${folder}" + fi + done +} + + +testReadWritePermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local test_file=${dir_to_check}/test-permissions + + # Write file + if echo test > ${test_file} 1> /dev/null 2>&1; then + # Write succeeded. Testing read... + if cat ${test_file} > /dev/null; then + rm -f ${test_file} + else + error=true + fi + else + error=true + fi + + if [ ${error} == true ]; then + return 1 + else + return 0 + fi +} + +# Test directory has read/write permissions for current user +testDirectoryPermissions () { + local dir_to_check=$1 + local error=false + + [ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory" + + local u_id=$(id -u) + local id_str="id ${u_id}" + + logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}" + + if ! testReadWritePermissions ${dir_to_check}; then + error=true + fi + + if [ "${error}" == true ]; then + local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check}) + logger "###########################################################" + logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}" + logger "${stat_data}" + logger "Mounted directory must have read/write permissions for user ${id_str}" + logger "###########################################################" + errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}" + fi + logger "Permissions for ${dir_to_check} are good" +} + +# Utility method to create a directory path recursively with chown feature as +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: Root directory from where the path can be created +## $2: List of recursive child directories seperated by space +## $3: user who should own the directory. Optional +## $4: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA +# +# Usage: +# createRecursiveDir "/opt/jfrog/product/var" "bootstrap tomcat lib" "user_name" "group_name" +createRecursiveDir(){ + local rootDir=$1 + local pathDirs=$2 + local user=$3 + local group=${4:-${user}} + local fullPath= + + [ ! -z "${rootDir}" ] || return 0 + + createDir "${rootDir}" "${user}" "${group}" + + [ ! -z "${pathDirs}" ] || return 0 + + fullPath=${rootDir} + + for dir in ${pathDirs}; do + fullPath=${fullPath}/${dir} + createDir "${fullPath}" "${user}" "${group}" + done +} + +# Utility method to create a directory +# Failure conditions: +## Exits if unable to create a directory +# Parameters: +## $1: directory to create +## $2: user who should own the directory. Optional +## $3: group who should own the directory. Optional +# Depends on global: none +# Updates global: none +# Returns: NA + +createDir(){ + local dirName="$1" + local printMessage=no + logSilly "Method ${FUNCNAME[0]} invoked with [$dirName]" + [ -z "${dirName}" ] && return + + logDebug "Attempting to create ${dirName}" + mkdir -p "${dirName}" || errorExit "Unable to create directory: [${dirName}]" + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + # Earlier, this line would have returned 1 if it failed. Now it just warns. + # This is intentional. Earlier, this line would NOT be reached if the folder already existed. + # Since it will always come to this line and the script may be running as a non-root user, this method will just warn if + # setting permissions fails (so as to not affect any existing flows) + io_setOwnershipNonRecursive "$dirName" "$userID" "$groupID" || warn "Could not set owner of [$dirName] to [$userID:$groupID]" + fi + # logging message to print created dir with user and group + local logMessage=${4:-$printMessage} + if [[ "${logMessage}" == "yes" ]]; then + logger "Successfully created directory [${dirName}]. Owner: [${userID}:${groupID}]" + fi +} + +removeSoftLinkAndCreateDir () { + local dirName="$1" + local userID="$2" + local groupID="$3" + local logMessage="$4" + removeSoftLink "${dirName}" + createDir "${dirName}" "${userID}" "${groupID}" "${logMessage}" +} + +# Utility method to remove a soft link +removeSoftLink () { + local dirName="$1" + if [[ -L "${dirName}" ]]; then + targetLink=$(readlink -f "${dirName}") + logger "Removing the symlink [${dirName}] pointing to [${targetLink}]" + rm -f "${dirName}" + fi +} + +# Check Directory exist in the path +checkDirExists () { + local directoryPath="$1" + + [[ -d "${directoryPath}" ]] && echo -n "true" || echo -n "false" +} + + +# Utility method to create a file +# Failure conditions: +# Parameters: +## $1: file to create +# Depends on global: none +# Updates global: none +# Returns: NA + +createFile(){ + local fileName="$1" + logSilly "Method ${FUNCNAME[0]} [$fileName]" + [ -f "${fileName}" ] && return 0 + touch "${fileName}" || return 1 + + local userID="$2" + local groupID=${3:-$userID} + + # If UID/GID is passed, chown the folder + if [ ! -z "$userID" ] && [ ! -z "$groupID" ]; then + io_setOwnership "$fileName" "$userID" "$groupID" || return 1 + fi +} + +# Check File exist in the filePath +# IMPORTANT- DON'T ADD LOGGING to this method +checkFileExists () { + local filePath="$1" + + [[ -f "${filePath}" ]] && echo -n "true" || echo -n "false" +} + +# Check for directories contains any (files or sub directories) +# IMPORTANT- DON'T ADD LOGGING to this method +checkDirContents () { + local directoryPath="$1" + if [[ "$(ls -1 "${directoryPath}" | wc -l)" -gt 0 ]]; then + echo -n "true" + else + echo -n "false" + fi +} + +# Check contents exist in directory +# IMPORTANT- DON'T ADD LOGGING to this method +checkContentExists () { + local source="$1" + + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + echo -n "false" + else + echo -n "true" + fi +} + +# Resolve the variable +# IMPORTANT- DON'T ADD LOGGING to this method +evalVariable () { + local output="$1" + local input="$2" + + eval "${output}"=\${"${input}"} + eval echo \${"${output}"} +} + +# Usage: if [ "$(io_commandExists 'curl')" == "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_commandExists() { + local commandToExecute="$1" + hash "${commandToExecute}" 2>/dev/null + local rt=$? + if [ "$rt" == 0 ]; then echo -n "yes"; else echo -n "no"; fi +} + +# Usage: if [ "$(io_curlExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_curlExists() { + io_commandExists "curl" +} + + +io_hasMatch() { + logSilly "Method ${FUNCNAME[0]}" + local result=0 + logDebug "Executing [echo \"$1\" | grep \"$2\" >/dev/null 2>&1]" + echo "$1" | grep "$2" >/dev/null 2>&1 || result=1 + return $result +} + +# Utility method to check if the string passed (usually a connection url) corresponds to this machine itself +# Failure conditions: None +# Parameters: +## $1: string to check against +# Depends on global: none +# Updates global: IS_LOCALHOST with value "yes/no" +# Returns: NA + +io_getIsLocalhost() { + logSilly "Method ${FUNCNAME[0]}" + IS_LOCALHOST="$FLAG_N" + local inputString="$1" + logDebug "Parsing [$inputString] to check if we are dealing with this machine itself" + + io_hasMatch "$inputString" "localhost" && { + logDebug "Found localhost. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for localhost" + + local hostIP=$(io_getPublicHostIP) + io_hasMatch "$inputString" "$hostIP" && { + logDebug "Found $hostIP. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostIP" + + local hostID=$(io_getPublicHostID) + io_hasMatch "$inputString" "$hostID" && { + logDebug "Found $hostID. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostID" + + local hostName=$(io_getPublicHostName) + io_hasMatch "$inputString" "$hostName" && { + logDebug "Found $hostName. Returning [$FLAG_Y]" + IS_LOCALHOST="$FLAG_Y" && return; + } || logDebug "Did not find match for $hostName" + +} + +# Usage: if [ "$(io_tarExists)" != "yes" ] +# IMPORTANT- DON'T ADD LOGGING to this method +io_tarExists() { + io_commandExists "tar" +} + +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostIP() { + local OS_TYPE=$(uname) + local publicHostIP= + if [ "${OS_TYPE}" == "Darwin" ]; then + ipStatus=$(ifconfig en0 | grep "status" | awk '{print$2}') + if [ "${ipStatus}" == "active" ]; then + publicHostIP=$(ifconfig en0 | grep inet | grep -v inet6 | awk '{print $2}') + else + errorExit "Host IP could not be resolved!" + fi + elif [ "${OS_TYPE}" == "Linux" ]; then + publicHostIP=$(hostname -i 2>/dev/null || echo "127.0.0.1") + fi + publicHostIP=$(echo "${publicHostIP}" | awk '{print $1}') + echo -n "${publicHostIP}" +} + +# Will return the short host name (up to the first dot) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostName() { + echo -n "$(hostname -s)" +} + +# Will return the full host name (use this as much as possible) +# IMPORTANT- DON'T ADD LOGGING to this method +io_getPublicHostID() { + echo -n "$(hostname)" +} + +# Utility method to backup a file +# Failure conditions: NA +# Parameters: filePath +# Depends on global: none, +# Updates global: none +# Returns: NA +io_backupFile() { + logSilly "Method ${FUNCNAME[0]}" + fileName="$1" + if [ ! -f "${filePath}" ]; then + logDebug "No file: [${filePath}] to backup" + return + fi + dateTime=$(date +"%Y-%m-%d-%H-%M-%S") + targetFileName="${fileName}.backup.${dateTime}" + yes | \cp -f "$fileName" "${targetFileName}" + logger "File [${fileName}] backedup as [${targetFileName}]" +} + +# Reference https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash/4025065#4025065 +is_number() { + case "$BASH_VERSION" in + 3.1.*) + PATTERN='\^\[0-9\]+\$' + ;; + *) + PATTERN='^[0-9]+$' + ;; + esac + + [[ "$1" =~ $PATTERN ]] +} + +io_compareVersions() { + if [[ $# != 2 ]] + then + echo "Usage: min_version current minimum" + return + fi + + A="${1%%.*}" + B="${2%%.*}" + + if [[ "$A" != "$1" && "$B" != "$2" && "$A" == "$B" ]] + then + io_compareVersions "${1#*.}" "${2#*.}" + else + if is_number "$A" && is_number "$B" + then + if [[ "$A" -eq "$B" ]]; then + echo "0" + elif [[ "$A" -gt "$B" ]]; then + echo "1" + elif [[ "$A" -lt "$B" ]]; then + echo "-1" + fi + fi + fi +} + +# Reference https://stackoverflow.com/questions/369758/how-to-trim-whitespace-from-a-bash-variable +# Strip all leading and trailing spaces +# IMPORTANT- DON'T ADD LOGGING to this method +io_trim() { + local var="$1" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +# temporary function will be removing it ASAP +# search for string and replace text in file +replaceText_migration_hook () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s/${regexString}/${replaceText}/" "${file}" || warn "Failed to replace the text in ${file}" + fi +} + +# search for string and replace text in file +replaceText () { + local regexString="$1" + local replaceText="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + else + sed -i -e "s#${regexString}#${replaceText}#" "${file}" || warn "Failed to replace the text in ${file}" + logDebug "Replaced [$regexString] with [$replaceText] in [$file]" + fi +} + +# search for string and prepend text in file +prependText () { + local regexString="$1" + local text="$2" + local file="$3" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + else + sed -i -e '/'"${regexString}"'/i\'$'\n\\'"${text}"''$'\n' "${file}" || warn "Failed to prepend the text in ${file}" + fi +} + +# add text to beginning of the file +addText () { + local text="$1" + local file="$2" + + if [[ "$(checkFileExists "${file}")" != "true" ]]; then + return + fi + if [[ $(uname) == "Darwin" ]]; then + sed -i '' -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + else + sed -i -e '1s/^/'"${text}"'\'$'\n/' "${file}" || warn "Failed to add the text in ${file}" + fi +} + +io_replaceString () { + local value="$1" + local firstString="$2" + local secondString="$3" + local separator=${4:-"/"} + local updateValue= + if [[ $(uname) == "Darwin" ]]; then + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + else + updateValue=$(echo "${value}" | sed "s${separator}${firstString}${separator}${secondString}${separator}") + fi + echo -n "${updateValue}" +} + +_findYQ() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + local parentDir="$1" + if [ -z "$parentDir" ]; then + return + fi + logDebug "Executing command [find "${parentDir}" -name third-party -type d]" + local yq=$(find "${parentDir}" -name third-party -type d) + if [ -d "${yq}/yq" ]; then + export YQ_PATH="${yq}/yq" + fi +} + + +io_setYQPath() { + # logSilly "Method ${FUNCNAME[0]}" (Intentionally not logging. Does not add value) + if [ "$(io_commandExists 'yq')" == "yes" ]; then + return + fi + + if [ ! -z "${JF_PRODUCT_HOME}" ] && [ -d "${JF_PRODUCT_HOME}" ]; then + _findYQ "${JF_PRODUCT_HOME}" + fi + + if [ -z "${YQ_PATH}" ] && [ ! -z "${COMPOSE_HOME}" ] && [ -d "${COMPOSE_HOME}" ]; then + _findYQ "${COMPOSE_HOME}" + fi + # TODO We can remove this block after all the code is restructured. + if [ -z "${YQ_PATH}" ] && [ ! -z "${SCRIPT_HOME}" ] && [ -d "${SCRIPT_HOME}" ]; then + _findYQ "${SCRIPT_HOME}" + fi + +} + +io_getLinuxDistribution() { + LINUX_DISTRIBUTION= + + # Make sure running on Linux + [ $(uname -s) != "Linux" ] && return + + # Find out what Linux distribution we are on + + cat /etc/*-release | grep -i Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 6.x + cat /etc/issue.net | grep Red >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat || true + + # OS 7.x + cat /etc/*-release | grep -i centos >/dev/null 2>&1 && LINUX_DISTRIBUTION=CentOS && LINUX_DISTRIBUTION_VER="7" || true + + # OS 8.x + grep -q -i "release 8" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="8" || true + + # OS 7.x + grep -q -i "release 7" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="7" || true + + # OS 6.x + grep -q -i "release 6" /etc/redhat-release >/dev/null 2>&1 && LINUX_DISTRIBUTION_VER="6" || true + + cat /etc/*-release | grep -i Red | grep -i 'VERSION=7' >/dev/null 2>&1 && LINUX_DISTRIBUTION=RedHat && LINUX_DISTRIBUTION_VER="7" || true + + cat /etc/*-release | grep -i debian >/dev/null 2>&1 && LINUX_DISTRIBUTION=Debian || true + + cat /etc/*-release | grep -i ubuntu >/dev/null 2>&1 && LINUX_DISTRIBUTION=Ubuntu || true +} + +## Utility method to check ownership of folders/files +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If file is not owned by the user & group +## Parameters: + ## user + ## group + ## folder to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac +io_checkOwner () { + logSilly "Method ${FUNCNAME[0]}" + local osType=$(uname) + + if [ "${osType}" != "Linux" ]; then + logDebug "Unsupported OS. Skipping check" + return 0 + fi + + local file_to_check=$1 + local user_id_to_check=$2 + + + if [ -z "$user_id_to_check" ] || [ -z "$file_to_check" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group_id_to_check=${3:-$user_id_to_check} + local check_user_name=${4:-"no"} + + logDebug "Checking permissions on [$file_to_check] for user [$user_id_to_check] & group [$group_id_to_check]" + + local stat= + + if [ "${check_user_name}" == "yes" ]; then + stat=( $(stat -Lc "%U %G" ${file_to_check}) ) + else + stat=( $(stat -Lc "%u %g" ${file_to_check}) ) + fi + + local user_id=${stat[0]} + local group_id=${stat[1]} + + if [[ "${user_id}" != "${user_id_to_check}" ]] || [[ "${group_id}" != "${group_id_to_check}" ]] ; then + logDebug "Ownership mismatch. [${file_to_check}] is not owned by [${user_id_to_check}:${group_id_to_check}]" + return 1 + else + return 0 + fi +} + +## Utility method to change ownership of a file/folder - NON recursive +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnershipNonRecursive() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown ${user}:${group} ${targetFile}]" + chown ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to change ownership of a file. +## IMPORTANT +## If being called on a folder, should ONLY be called for fresh folders or may cause performance issues +## Failure conditions: + ## If invoked with incorrect inputs - FATAL + ## If chown operation fails - returns 1 +## Parameters: + ## user + ## group + ## file to chown +## Globals: none +## Returns: none +## NOTE: The method does NOTHING if the OS is Mac + +io_setOwnership() { + + local osType=$(uname) + if [ "${osType}" != "Linux" ]; then + return + fi + + local targetFile=$1 + local user=$2 + + if [ -z "$user" ] || [ -z "$targetFile" ]; then + errorExit "Invalid invocation of method. Missing mandatory inputs" + fi + + local group=${3:-$user} + logDebug "Method ${FUNCNAME[0]}. Executing [chown -R ${user}:${group} ${targetFile}]" + chown -R ${user}:${group} ${targetFile} || return 1 +} + +## Utility method to create third party folder structure necessary for Postgres +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## POSTGRESQL_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createPostgresDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${POSTGRESQL_DATA_ROOT}" ] && return 0 + + logDebug "Property [${POSTGRESQL_DATA_ROOT}] exists. Proceeding" + + createDir "${POSTGRESQL_DATA_ROOT}/data" + io_setOwnership "${POSTGRESQL_DATA_ROOT}" "${POSTGRES_USER}" "${POSTGRES_USER}" || errorExit "Setting ownership of [${POSTGRESQL_DATA_ROOT}] to [${POSTGRES_USER}:${POSTGRES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Nginx +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## NGINX_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createNginxDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${NGINX_DATA_ROOT}" ] && return 0 + + logDebug "Property [${NGINX_DATA_ROOT}] exists. Proceeding" + + createDir "${NGINX_DATA_ROOT}" + io_setOwnership "${NGINX_DATA_ROOT}" "${NGINX_USER}" "${NGINX_GROUP}" || errorExit "Setting ownership of [${NGINX_DATA_ROOT}] to [${NGINX_USER}:${NGINX_GROUP}] failed" +} + +## Utility method to create third party folder structure necessary for ElasticSearch +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## ELASTIC_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createElasticSearchDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${ELASTIC_DATA_ROOT}" ] && return 0 + + logDebug "Property [${ELASTIC_DATA_ROOT}] exists. Proceeding" + + createDir "${ELASTIC_DATA_ROOT}/data" + io_setOwnership "${ELASTIC_DATA_ROOT}" "${ES_USER}" "${ES_USER}" || errorExit "Setting ownership of [${ELASTIC_DATA_ROOT}] to [${ES_USER}:${ES_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Redis +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## REDIS_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRedisDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${REDIS_DATA_ROOT}" ] && return 0 + + logDebug "Property [${REDIS_DATA_ROOT}] exists. Proceeding" + + createDir "${REDIS_DATA_ROOT}" + io_setOwnership "${REDIS_DATA_ROOT}" "${REDIS_USER}" "${REDIS_USER}" || errorExit "Setting ownership of [${REDIS_DATA_ROOT}] to [${REDIS_USER}:${REDIS_USER}] failed" +} + +## Utility method to create third party folder structure necessary for Mongo +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## MONGODB_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createMongoDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${MONGODB_DATA_ROOT}" ] && return 0 + + logDebug "Property [${MONGODB_DATA_ROOT}] exists. Proceeding" + + createDir "${MONGODB_DATA_ROOT}/logs" + createDir "${MONGODB_DATA_ROOT}/configdb" + createDir "${MONGODB_DATA_ROOT}/db" + io_setOwnership "${MONGODB_DATA_ROOT}" "${MONGO_USER}" "${MONGO_USER}" || errorExit "Setting ownership of [${MONGODB_DATA_ROOT}] to [${MONGO_USER}:${MONGO_USER}] failed" +} + +## Utility method to create third party folder structure necessary for RabbitMQ +## Failure conditions: +## If creation of directory or assigning permissions fails +## Parameters: none +## Globals: +## RABBITMQ_DATA_ROOT +## Returns: none +## NOTE: The method does NOTHING if the folder already exists +io_createRabbitMQDir() { + logDebug "Method ${FUNCNAME[0]}" + [ -z "${RABBITMQ_DATA_ROOT}" ] && return 0 + + logDebug "Property [${RABBITMQ_DATA_ROOT}] exists. Proceeding" + + createDir "${RABBITMQ_DATA_ROOT}" + io_setOwnership "${RABBITMQ_DATA_ROOT}" "${RABBITMQ_USER}" "${RABBITMQ_USER}" || errorExit "Setting ownership of [${RABBITMQ_DATA_ROOT}] to [${RABBITMQ_USER}:${RABBITMQ_USER}] failed" +} + +# Add or replace a property in provided properties file +addOrReplaceProperty() { + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + local delimiter=${4:-"="} + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}\s*${delimiter}.*$" ${propertiesPath} > /dev/null 2>&1 + [ $? -ne 0 ] && echo -e "\n${propertyName}${delimiter}${propertyValue}" >> ${propertiesPath} + sed -i -e "s|^${propertyName}\s*${delimiter}.*$|${propertyName}${delimiter}${propertyValue}|g;" ${propertiesPath} +} + +# Set property only if its not set +io_setPropertyNoOverride(){ + local propertyName=$1 + local propertyValue=$2 + local propertiesPath=$3 + + # Return if any of the inputs are empty + [[ -z "$propertyName" || "$propertyName" == "" ]] && return + [[ -z "$propertyValue" || "$propertyValue" == "" ]] && return + [[ -z "$propertiesPath" || "$propertiesPath" == "" ]] && return + + grep "^${propertyName}:" ${propertiesPath} > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${propertyName}: ${propertyValue}" >> ${propertiesPath} || warn "Setting property ${propertyName}: ${propertyValue} in [ ${propertiesPath} ] failed" + else + logger "Skipping update of property : ${propertyName}" >&6 + fi +} + +# Add a line to a file if it doesn't already exist +addLine() { + local line_to_add=$1 + local target_file=$2 + logger "Trying to add line $1 to $2" >&6 2>&1 + cat "$target_file" | grep -F "$line_to_add" -wq >&6 2>&1 + if [ $? != 0 ]; then + logger "Line does not exist and will be added" >&6 2>&1 + echo $line_to_add >> $target_file || errorExit "Could not update $target_file" + fi +} + +# Utility method to check if a value (first paramter) exists in an array (2nd parameter) +# 1st parameter "value to find" +# 2nd parameter "The array to search in. Please pass a string with each value separated by space" +# Example: containsElement "y" "y Y n N" +containsElement () { + local searchElement=$1 + local searchArray=($2) + local found=1 + for elementInIndex in "${searchArray[@]}";do + if [[ $elementInIndex == $searchElement ]]; then + found=0 + fi + done + return $found +} + +# Utility method to get user's choice +# 1st parameter "what to ask the user" +# 2nd parameter "what choices to accept, separated by spaces" +# 3rd parameter "what is the default choice (to use if the user simply presses Enter)" +# Example 'getUserChoice "Are you feeling lucky? Punk!" "y n Y N" "y"' +getUserChoice(){ + configureLogOutput + read_timeout=${read_timeout:-0.5} + local choice="na" + local text_to_display=$1 + local choices=$2 + local default_choice=$3 + users_choice= + + until containsElement "$choice" "$choices"; do + echo "";echo ""; + sleep $read_timeout #This ensures correct placement of the question. + read -p "$text_to_display :" choice + : ${choice:=$default_choice} + done + users_choice=$choice + echo -e "\n$text_to_display: $users_choice" >&6 + sleep $read_timeout #This ensures correct logging +} + +setFilePermission () { + local permission=$1 + local file=$2 + chmod "${permission}" "${file}" || warn "Setting permission ${permission} to file [ ${file} ] failed" +} + + +#setting required paths +setAppDir (){ + SCRIPT_DIR=$(dirname $0) + SCRIPT_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + APP_DIR="`cd "${SCRIPT_HOME}";pwd`" +} + +ZIP_TYPE="zip" +COMPOSE_TYPE="compose" +HELM_TYPE="helm" +RPM_TYPE="rpm" +DEB_TYPE="debian" + +sourceScript () { + local file="$1" + + [ ! -z "${file}" ] || errorExit "target file is not passed to source a file" + + if [ ! -f "${file}" ]; then + errorExit "${file} file is not found" + else + source "${file}" || errorExit "Unable to source ${file}, please check if the user ${USER} has permissions to perform this action" + fi +} +# Source required helpers +initHelpers () { + local systemYamlHelper="${APP_DIR}/systemYamlHelper.sh" + local thirdPartyDir=$(find ${APP_DIR}/.. -name third-party -type d) + export YQ_PATH="${thirdPartyDir}/yq" + LIBXML2_PATH="${thirdPartyDir}/libxml2/bin/xmllint" + export LD_LIBRARY_PATH="${thirdPartyDir}/libxml2/lib" + sourceScript "${systemYamlHelper}" +} +# Check migration info yaml file available in the path +checkMigrationInfoYaml () { + + if [[ -f "${APP_DIR}/migrationHelmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationHelmInfo.yaml" + INSTALLER="${HELM_TYPE}" + elif [[ -f "${APP_DIR}/migrationZipInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationZipInfo.yaml" + INSTALLER="${ZIP_TYPE}" + elif [[ -f "${APP_DIR}/migrationRpmInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationRpmInfo.yaml" + INSTALLER="${RPM_TYPE}" + elif [[ -f "${APP_DIR}/migrationDebInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationDebInfo.yaml" + INSTALLER="${DEB_TYPE}" + elif [[ -f "${APP_DIR}/migrationComposeInfo.yaml" ]]; then + MIGRATION_SYSTEM_YAML_INFO="${APP_DIR}/migrationComposeInfo.yaml" + INSTALLER="${COMPOSE_TYPE}" + else + errorExit "File migration Info yaml does not exist in [${APP_DIR}]" + fi +} + +retrieveYamlValue () { + local yamlPath="$1" + local value="$2" + local output="$3" + local message="$4" + + [[ -z "${yamlPath}" ]] && errorExit "yamlPath is mandatory to get value from ${MIGRATION_SYSTEM_YAML_INFO}" + + getYamlValue "${yamlPath}" "${MIGRATION_SYSTEM_YAML_INFO}" "false" + value="${YAML_VALUE}" + if [[ -z "${value}" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "Empty value for ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + elif [[ "${output}" == "Skip" ]]; then + return + else + errorExit "${message}" + fi + fi +} + +checkEnv () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + # check Environment JF_PRODUCT_HOME is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_PRODUCT_HOME")" + if [[ -z "${NEW_DATA_DIR}" ]]; then + errorExit "Environment variable JF_PRODUCT_HOME is not set, this is required to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + getCustomDataDir_hook + NEW_DATA_DIR="${OLD_DATA_DIR}" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + else + # check Environment JF_ROOT_DATA_DIR is set before migration + OLD_DATA_DIR="$(evalVariable "OLD_DATA_DIR" "JF_ROOT_DATA_DIR")" + # check Environment JF_ROOT_DATA_DIR is set before migration + NEW_DATA_DIR="$(evalVariable "NEW_DATA_DIR" "JF_ROOT_DATA_DIR")" + if [[ -z "${NEW_DATA_DIR}" ]] && [[ -z "${OLD_DATA_DIR}" ]]; then + errorExit "Could not find ${PROMPT_DATA_DIR_LOCATION} to perform Migration" + fi + # appending var directory to $JF_PRODUCT_HOME + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi + +} + +getDataDir () { + + if [[ "${INSTALLER}" == "${ZIP_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}"|| "${INSTALLER}" == "${HELM_TYPE}" ]]; then + checkEnv + else + getCustomDataDir_hook + NEW_DATA_DIR="`cd "${APP_DIR}"/../../;pwd`" + NEW_DATA_DIR="${NEW_DATA_DIR}/var" + fi +} + +# Retrieve Product name from MIGRATION_SYSTEM_YAML_INFO +getProduct () { + retrieveYamlValue "migration.product" "${YAML_VALUE}" "Fail" "Empty value under ${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + PRODUCT="${YAML_VALUE}" + PRODUCT=$(echo "${PRODUCT}" | tr '[:upper:]' '[:lower:]' 2>/dev/null) + if [[ "${PRODUCT}" != "artifactory" && "${PRODUCT}" != "distribution" && "${PRODUCT}" != "xray" ]]; then + errorExit "migration.product in [${MIGRATION_SYSTEM_YAML_INFO}] is not correct, please set based on product as ARTIFACTORY or DISTRIBUTION" + fi + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + JF_USER="${PRODUCT}" + fi +} +# Compare product version with minProductVersion and maxProductVersion +migrateCheckVersion () { + local productVersion="$1" + local minProductVersion="$2" + local maxProductVersion="$3" + local productVersion618="6.18.0" + local unSupportedProductVersions7=("7.2.0 7.2.1") + + if [[ "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${maxProductVersion}")" -eq 1 ]]; then + logger "Migration not necessary. ${PRODUCT} is already ${productVersion}" + exit 11 + elif [[ "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${minProductVersion}")" -eq 1 ]]; then + if [[ ("$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 0 || "$(io_compareVersions "${productVersion}" "${productVersion618}")" -eq 1) && " ${unSupportedProductVersions7[@]} " =~ " ${CURRENT_VERSION} " ]]; then + touch /tmp/error; + errorExit "Current ${PRODUCT} version (${productVersion}) does not support migration to ${CURRENT_VERSION}" + else + bannerStart "Detected ${PRODUCT} ${productVersion}, initiating migration" + fi + else + logger "Current ${PRODUCT} ${productVersion} version is not supported for migration" + exit 1 + fi +} + +getProductVersion () { + local minProductVersion="$1" + local maxProductVersion="$2" + local newfilePath="$3" + local oldfilePath="$4" + local propertyInDocker="$5" + local property="$6" + local productVersion= + local status= + + if [[ "$INSTALLER" == "${COMPOSE_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + elif [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${propertyInDocker}" "${newfilePath}")" + status="fail" + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + exit 0 + fi + elif [[ "$INSTALLER" == "${HELM_TYPE}" ]]; then + if [[ -f "${oldfilePath}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + else + productVersion="$(cat "${oldfilePath}")" + fi + status="success" + else + productVersion="${CURRENT_VERSION}" + [[ -z "${productVersion}" || "${productVersion}" == "" ]] && logger "${PRODUCT} CURRENT_VERSION is not set" && exit 0 + fi + else + if [[ -f "${newfilePath}" ]]; then + productVersion="$(readKey "${property}" "${newfilePath}")" + status="fail" + elif [[ -f "${oldfilePath}" ]]; then + productVersion="$(readKey "${property}" "${oldfilePath}")" + status="success" + else + if [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + logger "File [${newfilePath}] not found to get current version." + else + logger "File [${oldfilePath}] or [${newfilePath}] not found to get current version." + fi + exit 0 + fi + fi + if [[ -z "${productVersion}" || "${productVersion}" == "" ]]; then + [[ "${status}" == "success" ]] && logger "No version found in file [${oldfilePath}]." + [[ "${status}" == "fail" ]] && logger "No version found in file [${newfilePath}]." + exit 0 + fi + + migrateCheckVersion "${productVersion}" "${minProductVersion}" "${maxProductVersion}" +} + +readKey () { + local property="$1" + local file="$2" + local version= + + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + version="${value}" && check=true && break + else + check=false + fi + done < "${file}" + if [[ "${check}" == "false" ]]; then + return + fi + echo "${version}" +} + +# create Log directory +createLogDir () { + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" + fi +} + +# Creating migration log file +creationMigrateLog () { + local LOG_FILE_NAME="migration.log" + createLogDir + local MIGRATION_LOG_FILE="${NEW_DATA_DIR}/log/${LOG_FILE_NAME}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + MIGRATION_LOG_FILE="${SCRIPT_HOME}/${LOG_FILE_NAME}" + fi + touch "${MIGRATION_LOG_FILE}" + setFilePermission "${LOG_FILE_PERMISSION}" "${MIGRATION_LOG_FILE}" + exec &> >(tee -a "${MIGRATION_LOG_FILE}") +} +# Set path where system.yaml should create +setSystemYamlPath () { + SYSTEM_YAML_PATH="${NEW_DATA_DIR}/etc/system.yaml" + if [[ "${INSTALLER}" != "${HELM_TYPE}" ]]; then + logger "system.yaml will be created in path [${SYSTEM_YAML_PATH}]" + fi +} +# Create directory +createDirectory () { + local directory="$1" + local output="$2" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${directory}" + mkdir -p "${directory}" && check=true || check=false + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi + setOwnershipBasedOnInstaller "${directory}" +} + +setOwnershipBasedOnInstaller () { + local directory="$1" + if [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + chown -R ${USER_TO_CHECK}:${GROUP_TO_CHECK} "${directory}" || warn "Setting ownership on $directory failed" + elif [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + io_setOwnership "${directory}" "${JF_USER}" "${JF_USER}" + fi +} + +getUserAndGroup () { + local file="$1" + read uid gid <<<$(stat -c '%U %G' ${file}) + USER_TO_CHECK="${uid}" + GROUP_TO_CHECK="${gid}" +} + +# set ownership +getUserAndGroupFromFile () { + case $PRODUCT in + artifactory) + getUserAndGroup "/etc/opt/jfrog/artifactory/artifactory.properties" + ;; + distribution) + getUserAndGroup "${OLD_DATA_DIR}/etc/versions.properties" + ;; + xray) + getUserAndGroup "${OLD_DATA_DIR}/security/master.key" + ;; + esac +} + +# creating required directories +createRequiredDirs () { + bannerSubSection "CREATING REQUIRED DIRECTORIES" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${JF_USER}" "${JF_USER}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${JF_USER}" "${JF_USER}" "yes" + io_setOwnership "${NEW_DATA_DIR}" "${JF_USER}" "${JF_USER}" + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data/postgres" "${POSTGRES_USER}" "${POSTGRES_USER}" "yes" + fi + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + getUserAndGroupFromFile + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/etc/security" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/data" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/log/archived" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/work" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + removeSoftLinkAndCreateDir "${NEW_DATA_DIR}/backup" "${USER_TO_CHECK}" "${GROUP_TO_CHECK}" "yes" + fi +} + +# Check entry in map is format +checkMapEntry () { + local entry="$1" + + [[ "${entry}" != *"="* ]] && echo -n "false" || echo -n "true" +} +# Check value Empty and warn +warnIfEmpty () { + local filePath="$1" + local yamlPath="$2" + local check= + + if [[ -z "${filePath}" ]]; then + warn "Empty value in yamlpath [${yamlPath} in [${MIGRATION_SYSTEM_YAML_INFO}]" + check=false + else + check=true + fi + echo "${check}" +} + +logCopyStatus () { + local status="$1" + local logMessage="$2" + local warnMessage="$3" + + [[ "${status}" == "success" ]] && logger "${logMessage}" + [[ "${status}" == "fail" ]] && warn "${warnMessage}" +} +# copy contents from source to destination +copyCmd () { + local source="$1" + local target="$2" + local mode="$3" + local status= + + case $mode in + unique) + cp -up "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + specific) + cp -pf "${source}" "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied file [${source}] to [${target}]" "Failed to copy file [${source}] to [${target}]" + ;; + patternFiles) + cp -pf "${source}"* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied files matching [${source}*] to [${target}]" "Failed to copy files matching [${source}*] to [${target}]" + ;; + full) + cp -prf "${source}"/* "${target}"/ && status="success" || status="fail" + logCopyStatus "${status}" "Successfully copied directory contents from [${source}] to [${target}]" "Failed to copy directory contents from [${source}] to [${target}]" + ;; + esac +} +# Check contents exist in source before copying +copyOnContentExist () { + local source="$1" + local target="$2" + local mode="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + copyCmd "${source}" "${target}" "${mode}" + else + logger "No contents to copy from [${source}]" + fi +} + +# move source to destination +moveCmd () { + local source="$1" + local target="$2" + local status= + + mv -f "${source}" "${target}" && status="success" || status="fail" + [[ "${status}" == "success" ]] && logger "Successfully moved directory [${source}] to [${target}]" + [[ "${status}" == "fail" ]] && warn "Failed to move directory [${source}] to [${target}]" +} + +# symlink target to source +symlinkCmd () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + local check=false + + if [[ "${symlinkSubDir}" == "subDir" ]]; then + ln -sf "${source}"/* "${target}" && check=true || check=false + else + ln -sf "${source}" "${target}" && check=true || check=false + fi + + [[ "${check}" == "true" ]] && logger "Successfully symlinked directory [${target}] to old [${source}]" + [[ "${check}" == "false" ]] && warn "Symlink operation failed" +} +# Check contents exist in source before symlinking +symlinkOnExist () { + local source="$1" + local target="$2" + local symlinkSubDir="$3" + + if [[ "$(checkContentExists "${source}")" == "true" ]]; then + if [[ "${symlinkSubDir}" == "subDir" ]]; then + symlinkCmd "${source}" "${target}" "subDir" + else + symlinkCmd "${source}" "${target}" + fi + else + logger "No contents to symlink from [${source}]" + fi +} + +prependDir () { + local absolutePath="$1" + local fullPath="$2" + local sourcePath= + + if [[ "${absolutePath}" = \/* ]]; then + sourcePath="${absolutePath}" + else + sourcePath="${fullPath}" + fi + echo "${sourcePath}" +} + +getFirstEntry (){ + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $1}' +} + +getSecondEntry () { + local entry="$1" + + [[ -z "${entry}" ]] && return + echo "${entry}" | awk -F"=" '{print $2}' +} +# To get absolutePath +pathResolver () { + local directoryPath="$1" + local dataDir= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Warning" + dataDir="${YAML_VALUE}" + cd "${dataDir}" + else + cd "${OLD_DATA_DIR}" + fi + absoluteDir="`cd "${directoryPath}";pwd`" + echo "${absoluteDir}" +} + +checkPathResolver () { + local value="$1" + + if [[ "${value}" == \/* ]]; then + value="${value}" + else + value="$(pathResolver "${value}")" + fi + echo "${value}" +} + +propertyMigrate () { + local entry="$1" + local filePath="$2" + local fileName="$3" + local check=false + + local yamlPath="$(getFirstEntry "${entry}")" + local property="$(getSecondEntry "${entry}")" + if [[ -z "${property}" ]]; then + warn "Property is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${property}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + while IFS='=' read -r key value || [ -n "${key}" ]; + do + [[ ! "${key}" =~ \#.* && ! -z "${key}" && ! -z "${value}" ]] + key="$(io_trim "${key}")" + if [[ "${key}" == "${property}" ]]; then + if [[ "${PRODUCT}" == "artifactory" ]]; then + value="$(migrateResolveDerbyPath "${key}" "${value}")" + value="$(migrateResolveHaDirPath "${key}" "${value}")" + value="$(updatePostgresUrlString_Hook "${yamlPath}" "${value}")" + fi + if [[ "${key}" == "context.url" ]]; then + local ip=$(echo "${value}" | awk -F/ '{print $3}' | sed 's/:.*//') + setSystemValue "shared.node.ip" "${ip}" "${SYSTEM_YAML_PATH}" + logger "Setting [shared.node.ip] with [${ip}] in system.yaml" + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" && logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" && check=true && break || check=false + fi + done < "${NEW_DATA_DIR}/${filePath}/${fileName}" + [[ "${check}" == "false" ]] && logger "Property [${property}] not found in file [${fileName}]" +} + +setHaEnabled_hook () { + echo "" +} + +migratePropertiesFiles () { + local fileList= + local filePath= + local fileName= + local map= + + retrieveYamlValue "migration.propertyFiles.files" "fileList" "Skip" + fileList="${YAML_VALUE}" + if [[ -z "${fileList}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF PROPERTY FILES" + for file in ${fileList}; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.propertyFiles.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.propertyFiles.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + if [[ "$(checkFileExists "${NEW_DATA_DIR}/${filePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + # setting haEnabled with true only if ha-node.properties is present + setHaEnabled_hook "${filePath}" + retrieveYamlValue "migration.propertyFiles.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + propertyMigrate "${entry}" "${filePath}" "${fileName}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=property" + fi + done + else + logger "File [${fileName}] was not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} + +createTargetDir () { + local mountDir="$1" + local target="$2" + + logger "Target directory not found [${mountDir}/${target}], creating it" + createDirectoryRecursive "${mountDir}" "${target}" "Warning" +} + +createDirectoryRecursive () { + local mountDir="$1" + local target="$2" + local output="$3" + local check=false + local message="Could not create directory ${directory}, please check if the user ${USER} has permissions to perform this action" + removeSoftLink "${mountDir}/${target}" + local directory=$(echo "${target}" | tr '/' ' ' ) + local targetDir="${mountDir}" + for dir in ${directory}; + do + targetDir="${targetDir}/${dir}" + mkdir -p "${targetDir}" && check=true || check=false + setOwnershipBasedOnInstaller "${targetDir}" + done + if [[ "${check}" == "false" ]]; then + if [[ "${output}" == "Warning" ]]; then + warn "${message}" + else + errorExit "${message}" + fi + fi +} + +copyOperation () { + local source="$1" + local target="$2" + local mode="$3" + local check=false + local targetDataDir= + local targetLink= + local date= + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + #remove source if it is a symlink + if [[ -L "${source}" ]]; then + targetLink=$(readlink -f "${source}") + logger "Removing the symlink [${source}] pointing to [${targetLink}]" + rm -f "${source}" + source=${targetLink} + fi + if [[ "$(checkDirExists "${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path" + return + fi + if [[ "$(checkDirContents "${source}")" != "true" ]]; then + logger "No contents to copy from [${source}]" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyOnContentExist "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copySpecificFiles () { + local source="$1" + local target="$2" + local mode="$3" + + # prepend OLD_DATA_DIR only if source is relative path + source="$(prependDir "${source}" "${OLD_DATA_DIR}/${source}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkFileExists "${source}")" != "true" ]]; then + logger "Source file [${source}] does not exist in path" + return + fi + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${source}" "${targetDataDir}/${target}" "${mode}" +} + +copyPatternMatchingFiles () { + local source="$1" + local target="$2" + local mode="$3" + local sourcePath="${4}" + + # prepend OLD_DATA_DIR only if source is relative path + sourcePath="$(prependDir "${sourcePath}" "${OLD_DATA_DIR}/${sourcePath}")" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + copyLogMessage "${mode}" + if [[ "$(checkDirExists "${sourcePath}")" != "true" ]]; then + logger "Source [${sourcePath}] directory not found in path" + return + fi + if ls "${sourcePath}/${source}"* 1> /dev/null 2>&1; then + if [[ "$(checkDirExists "${targetDataDir}/${target}")" != "true" ]]; then + createTargetDir "${targetDataDir}" "${target}" + fi + copyCmd "${sourcePath}/${source}" "${targetDataDir}/${target}" "${mode}" + else + logger "Source file [${sourcePath}/${source}*] does not exist in path" + fi +} + +copyLogMessage () { + local mode="$1" + case $mode in + specific) + logger "Copy file [${source}] to target [${targetDataDir}/${target}]" + ;; + patternFiles) + logger "Copy files matching [${sourcePath}/${source}*] to target [${targetDataDir}/${target}]" + ;; + full) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + unique) + logger "Copy directory contents from source [${source}] to target [${targetDataDir}/${target}]" + ;; + esac +} + +copyBannerMessages () { + local mode="$1" + local textMode="$2" + case $mode in + specific) + bannerSection "COPY ${textMode} FILES" + ;; + patternFiles) + bannerSection "COPY MATCHING ${textMode}" + ;; + full) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + unique) + bannerSection "COPY ${textMode} DIRECTORIES CONTENTS" + ;; + esac +} + +invokeCopyFunctions () { + local mode="$1" + local source="$2" + local target="$3" + + case $mode in + specific) + copySpecificFiles "${source}" "${target}" "${mode}" + ;; + patternFiles) + retrieveYamlValue "migration.${copyFormat}.sourcePath" "map" "Warning" + local sourcePath="${YAML_VALUE}" + copyPatternMatchingFiles "${source}" "${target}" "${mode}" "${sourcePath}" + ;; + full) + copyOperation "${source}" "${target}" "${mode}" + ;; + unique) + copyOperation "${source}" "${target}" "${mode}" + ;; + esac +} +# Copies contents from source directory and target directory +copyDataDirectories () { + local copyFormat="$1" + local mode="$2" + local map= + local source= + local target= + local textMode= + local targetDataDir= + local copyFormatValue= + + retrieveYamlValue "migration.${copyFormat}" "${copyFormat}" "Skip" + copyFormatValue="${YAML_VALUE}" + if [[ -z "${copyFormatValue}" ]]; then + return + fi + textMode=$(echo "${mode}" | tr '[:lower:]' '[:upper:]' 2>/dev/null) + copyBannerMessages "${mode}" "${textMode}" + retrieveYamlValue "migration.${copyFormat}.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + targetDataDir="${NEW_DATA_DIR}" + else + targetDataDir="`cd "${NEW_DATA_DIR}"/../;pwd`" + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeCopyFunctions "${mode}" "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +invokeMoveFunctions () { + local source="$1" + local target="$2" + local sourceDataDir= + local targetBasename= + # prepend OLD_DATA_DIR only if source is relative path + sourceDataDir=$(prependDir "${source}" "${OLD_DATA_DIR}/${source}") + targetBasename=$(dirname "${target}") + logger "Moving directory source [${sourceDataDir}] to target [${NEW_DATA_DIR}/${target}]" + if [[ "$(checkDirExists "${sourceDataDir}")" != "true" ]]; then + logger "Directory [${sourceDataDir}] not found in path to move" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${targetBasename}")" != "true" ]]; then + createTargetDir "${NEW_DATA_DIR}" "${targetBasename}" + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/${target}" + else + moveCmd "${sourceDataDir}" "${NEW_DATA_DIR}/tempDir" + moveCmd "${NEW_DATA_DIR}/tempDir" "${NEW_DATA_DIR}/${target}" + fi +} + +# Move source directory and target directory +moveDirectories () { + local moveDataDirectories= + local map= + local source= + local target= + + retrieveYamlValue "migration.moveDirectories" "moveDirectories" "Skip" + moveDirectories="${YAML_VALUE}" + if [[ -z "${moveDirectories}" ]]; then + return + fi + bannerSection "MOVE DIRECTORIES" + retrieveYamlValue "migration.moveDirectories.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + invokeMoveFunctions "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +# Trim masterKey if its generated using hex 32 +trimMasterKey () { + local masterKeyDir=/opt/jfrog/artifactory/var/etc/security + local oldMasterKey=$(<${masterKeyDir}/master.key) + local oldMasterKey_Length=$(echo ${#oldMasterKey}) + local newMasterKey= + if [[ ${oldMasterKey_Length} -gt 32 ]]; then + bannerSection "TRIM MASTERKEY" + newMasterKey=$(echo ${oldMasterKey:0:32}) + cp ${masterKeyDir}/master.key ${masterKeyDir}/backup_master.key + logger "Original masterKey is backed up : ${masterKeyDir}/backup_master.key" + rm -rf ${masterKeyDir}/master.key + echo ${newMasterKey} > ${masterKeyDir}/master.key + logger "masterKey is trimmed : ${masterKeyDir}/master.key" + fi +} + +copyDirectories () { + + copyDataDirectories "copyFiles" "full" + copyDataDirectories "copyUniqueFiles" "unique" + copyDataDirectories "copySpecificFiles" "specific" + copyDataDirectories "copyPatternMatchingFiles" "patternFiles" +} + +symlinkDir () { + local source="$1" + local target="$2" + local targetDir= + local basename= + local targetParentDir= + + targetDir="$(dirname "${target}")" + if [[ "${targetDir}" == "${source}" ]]; then + # symlink the sub directories + createDirectory "${NEW_DATA_DIR}/${target}" "Warning" + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" "subDir" + basename="$(basename "${target}")" + cd "${NEW_DATA_DIR}/${target}" && rm -f "${basename}" + fi + else + targetParentDir="$(dirname "${NEW_DATA_DIR}/${target}")" + createDirectory "${targetParentDir}" "Warning" + if [[ "$(checkDirExists "${targetParentDir}")" == "true" ]]; then + symlinkOnExist "${OLD_DATA_DIR}/${source}" "${NEW_DATA_DIR}/${target}" + fi + fi +} + +symlinkOperation () { + local source="$1" + local target="$2" + local check=false + local targetLink= + local date= + + # Check if source is a link and do symlink + if [[ -L "${OLD_DATA_DIR}/${source}" ]]; then + targetLink=$(readlink -f "${OLD_DATA_DIR}/${source}") + symlinkOnExist "${targetLink}" "${NEW_DATA_DIR}/${target}" + else + # check if source is directory and do symlink + if [[ "$(checkDirExists "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "Source [${source}] directory not found in path to symlink" + return + fi + if [[ "$(checkDirContents "${OLD_DATA_DIR}/${source}")" != "true" ]]; then + logger "No contents found in [${OLD_DATA_DIR}/${source}] to symlink" + return + fi + if [[ "$(checkDirExists "${NEW_DATA_DIR}/${target}")" != "true" ]]; then + logger "Target directory [${NEW_DATA_DIR}/${target}] does not exist to create symlink, creating it" + symlinkDir "${source}" "${target}" + else + rm -rf "${NEW_DATA_DIR}/${target}" && check=true || check=false + [[ "${check}" == "false" ]] && warn "Failed to remove contents in [${NEW_DATA_DIR}/${target}/]" + symlinkDir "${source}" "${target}" + fi + fi +} +# Creates a symlink path - Source directory to which the symbolic link should point. +symlinkDirectories () { + local linkFiles= + local map= + local source= + local target= + + retrieveYamlValue "migration.linkFiles" "linkFiles" "Skip" + linkFiles="${YAML_VALUE}" + if [[ -z "${linkFiles}" ]]; then + return + fi + bannerSection "SYMLINK DIRECTORIES" + retrieveYamlValue "migration.linkFiles.map" "map" "Warning" + map="${YAML_VALUE}" + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + source="$(getSecondEntry "${entry}")" + target="$(getFirstEntry "${entry}")" + logger "Symlink directory [${NEW_DATA_DIR}/${target}] to old [${OLD_DATA_DIR}/${source}]" + [[ -z "${source}" ]] && warn "source value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${target}" ]] && warn "target value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + symlinkOperation "${source}" "${target}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e target=source" + fi + echo ""; + done +} + +updateConnectionString () { + local yamlPath="$1" + local value="$2" + local mongoPath="shared.mongo.url" + local rabbitmqPath="shared.rabbitMq.url" + local postgresPath="shared.database.url" + local redisPath="shared.redis.connectionString" + local mongoConnectionString="mongo.connectionString" + local sourceKey= + local hostIp=$(io_getPublicHostIP) + local hostKey= + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + # Replace @postgres:,@mongodb:,@rabbitmq:,@redis: to @{hostIp}: (Compose Installer) + hostKey="@${hostIp}:" + case $yamlPath in + ${postgresPath}) + sourceKey="@postgres:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoPath}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${rabbitmqPath}) + sourceKey="@rabbitmq:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${redisPath}) + sourceKey="@redis:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + ${mongoConnectionString}) + sourceKey="@mongodb:" + value=$(io_replaceString "${value}" "${sourceKey}" "${hostKey}") + ;; + esac + fi + echo -n "${value}" +} + +yamlMigrate () { + local entry="$1" + local sourceFile="$2" + local value= + local yamlPath= + local key= + yamlPath="$(getFirstEntry "${entry}")" + key="$(getSecondEntry "${entry}")" + if [[ -z "${key}" ]]; then + warn "key is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + getYamlValue "${key}" "${sourceFile}" "false" + value="${YAML_VALUE}" + if [[ ! -z "${value}" ]]; then + value=$(updateConnectionString "${yamlPath}" "${value}") + fi + if [[ "${PRODUCT}" == "artifactory" ]]; then + replicatorProfiling + fi + if [[ -z "${value}" ]]; then + logger "No value for [${key}] in [${sourceFile}]" + else + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the key [${key}] in system.yaml" + fi +} + +migrateYamlFile () { + local files= + local filePath= + local fileName= + local sourceFile= + local map= + retrieveYamlValue "migration.yaml.files" "files" "Skip" + files="${YAML_VALUE}" + if [[ -z "${files}" ]]; then + return + fi + bannerSection "MIGRATION OF YAML FILES" + for file in $files; + do + bannerSubSection "Processing Migration of $file" + retrieveYamlValue "migration.yaml.$file.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + retrieveYamlValue "migration.yaml.$file.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + [[ -z "${filePath}" && -z "${fileName}" ]] && continue + sourceFile="${NEW_DATA_DIR}/${filePath}/${fileName}" + if [[ "$(checkFileExists "${sourceFile}")" == "true" ]]; then + logger "File [${fileName}] found in path [${NEW_DATA_DIR}/${filePath}]" + retrieveYamlValue "migration.yaml.$file.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + yamlMigrate "${entry}" "${sourceFile}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done + else + logger "File [${fileName}] is not found in path [${NEW_DATA_DIR}/${filePath}] to migrate" + fi + done +} +# updates the key and value in system.yaml +updateYamlKeyValue () { + local entry="$1" + local value= + local yamlPath= + local key= + + yamlPath="$(getFirstEntry "${entry}")" + value="$(getSecondEntry "${entry}")" + if [[ -z "${value}" ]]; then + warn "value is empty in map [${entry}] in the file [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + if [[ -z "${yamlPath}" ]]; then + warn "yamlPath is empty for [${key}] in [${MIGRATION_SYSTEM_YAML_INFO}]" + return + fi + setSystemValue "${yamlPath}" "${value}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value [${value}] in system.yaml" +} + +updateSystemYamlFile () { + local updateYaml= + local map= + + retrieveYamlValue "migration.updateSystemYaml" "updateYaml" "Skip" + updateSystemYaml="${YAML_VALUE}" + if [[ -z "${updateSystemYaml}" ]]; then + return + fi + bannerSection "UPDATE SYSTEM YAML FILE WITH KEY AND VALUES" + retrieveYamlValue "migration.updateSystemYaml.map" "map" "Warning" + map="${YAML_VALUE}" + if [[ -z "${map}" ]]; then + return + fi + for entry in $map; + do + if [[ "$(checkMapEntry "${entry}")" == "true" ]]; then + updateYamlKeyValue "${entry}" + else + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e yamlPath=key" + fi + done +} + +backupFiles_hook () { + logSilly "Method ${FUNCNAME[0]}" +} + +backupDirectory () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyOnContentExist "${targetDir}" "${backupDirectory}/${dir}" "full" + fi +} + +removeOldDirectory () { + local backupDir="$1" + local entry="$2" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${entry}" "${OLD_DATA_DIR}/${entry}")" + local outputCheckDirExists="$(checkDirExists "${targetDir}")" + if [[ "${outputCheckDirExists}" != "true" ]]; then + logger "No [${targetDir}] directory found to delete" + echo ""; + return + fi + backupDirectory "${backupDir}" "${entry}" "${targetDir}" + rm -rf "${targetDir}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed directory [${targetDir}]" + [[ "${check}" == "false" ]] && warn "Failed to remove directory [${targetDir}]" + echo ""; +} + +cleanUpOldDataDirectories () { + local cleanUpOldDataDir= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldDataDir" "cleanUpOldDataDir" "Skip" + cleanUpOldDataDir="${YAML_VALUE}" + if [[ -z "${cleanUpOldDataDir}" ]]; then + return + fi + bannerSection "CLEAN UP OLD DATA DIRECTORIES" + retrieveYamlValue "migration.cleanUpOldDataDir.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old data configurations are backedup in [${backupDir}] directory ******" + backupFiles_hook "${backupDir}/${PRODUCT}" + for entry in $map; + do + removeOldDirectory "${backupDir}" "${entry}" + done +} + +backupFiles () { + local backupDir="$1" + local dir="$2" + local targetDir="$3" + local fileName="$4" + local effectiveUser= + local effectiveGroup= + + if [[ "${dir}" = \/* ]]; then + dir=$(echo "${dir/\//}") + fi + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" ]]; then + effectiveUser="${JF_USER}" + effectiveGroup="${JF_USER}" + elif [[ "${INSTALLER}" == "${DEB_TYPE}" || "${INSTALLER}" == "${RPM_TYPE}" ]]; then + effectiveUser="${USER_TO_CHECK}" + effectiveGroup="${GROUP_TO_CHECK}" + fi + + removeSoftLinkAndCreateDir "${backupDir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local backupDirectory="${backupDir}/${PRODUCT}" + removeSoftLinkAndCreateDir "${backupDirectory}" "${effectiveUser}" "${effectiveGroup}" "yes" + removeSoftLinkAndCreateDir "${backupDirectory}/${dir}" "${effectiveUser}" "${effectiveGroup}" "yes" + local outputCheckDirExists="$(checkDirExists "${backupDirectory}/${dir}")" + if [[ "${outputCheckDirExists}" == "true" ]]; then + copyCmd "${targetDir}/${fileName}" "${backupDirectory}/${dir}" "specific" + fi +} + +removeOldFiles () { + local backupDir="$1" + local directoryName="$2" + local fileName="$3" + local check=false + + # prepend OLD_DATA_DIR only if entry is relative path + local targetDir="$(prependDir "${directoryName}" "${OLD_DATA_DIR}/${directoryName}")" + local outputCheckFileExists="$(checkFileExists "${targetDir}/${fileName}")" + if [[ "${outputCheckFileExists}" != "true" ]]; then + logger "No [${targetDir}/${fileName}] file found to delete" + return + fi + backupFiles "${backupDir}" "${directoryName}" "${targetDir}" "${fileName}" + rm -f "${targetDir}/${fileName}" && check=true || check=false + [[ "${check}" == "true" ]] && logger "Successfully removed file [${targetDir}/${fileName}]" + [[ "${check}" == "false" ]] && warn "Failed to remove file [${targetDir}/${fileName}]" + echo ""; +} + +cleanUpOldFiles () { + local cleanUpFiles= + local map= + local entry= + + retrieveYamlValue "migration.cleanUpOldFiles" "cleanUpOldFiles" "Skip" + cleanUpOldFiles="${YAML_VALUE}" + if [[ -z "${cleanUpOldFiles}" ]]; then + return + fi + bannerSection "CLEAN UP OLD FILES" + retrieveYamlValue "migration.cleanUpOldFiles.map" "map" "Warning" + map="${YAML_VALUE}" + [[ -z "${map}" ]] && continue + date="$(date +%Y%m%d%H%M)" + backupDir="${NEW_DATA_DIR}/backup/backup-${date}" + bannerImportant "****** Old files are backedup in [${backupDir}] directory ******" + for entry in $map; + do + local outputCheckMapEntry="$(checkMapEntry "${entry}")" + if [[ "${outputCheckMapEntry}" != "true" ]]; then + warn "map entry [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}] is not in correct format, correct format i.e directoryName=fileName" + fi + local fileName="$(getSecondEntry "${entry}")" + local directoryName="$(getFirstEntry "${entry}")" + [[ -z "${fileName}" ]] && warn "File name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + [[ -z "${directoryName}" ]] && warn "Directory name value is empty for [${entry}] in [${MIGRATION_SYSTEM_YAML_INFO}]" && continue + removeOldFiles "${backupDir}" "${directoryName}" "${fileName}" + echo ""; + done +} + +startMigration () { + bannerSection "STARTING MIGRATION" +} + +endMigration () { + bannerSection "MIGRATION COMPLETED SUCCESSFULLY" +} + +initialize () { + setAppDir + _pauseExecution "setAppDir" + initHelpers + _pauseExecution "initHelpers" + checkMigrationInfoYaml + _pauseExecution "checkMigrationInfoYaml" + getProduct + _pauseExecution "getProduct" + getDataDir + _pauseExecution "getDataDir" +} + +main () { + case $PRODUCT in + artifactory) + migrateArtifactory + ;; + distribution) + migrateDistribution + ;; + xray) + migrationXray + ;; + esac + exit 0 +} + +# Ensures meta data is logged +LOG_BEHAVIOR_ADD_META="$FLAG_Y" + + +migrateResolveDerbyPath () { + local key="$1" + local value="$2" + + if [[ "${key}" == "url" && "${value}" == *"db.home"* ]]; then + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + derbyPath="/opt/jfrog/artifactory/var/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + else + derbyPath="${NEW_DATA_DIR}/data/artifactory/derby" + value=$(echo "${value}" | sed "s|{db.home}|$derbyPath|") + fi + fi + echo "${value}" +} + +migrateResolveHaDirPath () { + local key="$1" + local value="$2" + + if [[ "${INSTALLER}" == "${RPM_TYPE}" || "${INSTALLER}" == "${COMPOSE_TYPE}" || "${INSTALLER}" == "${HELM_TYPE}" || "${INSTALLER}" == "${DEB_TYPE}" ]]; then + if [[ "${key}" == "artifactory.ha.data.dir" || "${key}" == "artifactory.ha.backup.dir" ]]; then + value=$(checkPathResolver "${value}") + fi + fi + echo "${value}" +} +updatePostgresUrlString_Hook () { + local yamlPath="$1" + local value="$2" + local hostIp=$(io_getPublicHostIP) + local sourceKey="//postgresql:" + if [[ "${yamlPath}" == "shared.database.url" ]]; then + value=$(io_replaceString "${value}" "${sourceKey}" "//${hostIp}:" "#") + fi + echo "${value}" +} +# Check Artifactory product version +checkArtifactoryVersion () { + local minProductVersion="6.0.0" + local maxProductVersion="7.0.0" + local propertyInDocker="ARTIFACTORY_VERSION" + local property="artifactory.version" + + if [[ "${INSTALLER}" == "${COMPOSE_TYPE}" ]]; then + local newfilePath="${APP_DIR}/../.env" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${HELM_TYPE}" ]]; then + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + elif [[ "${INSTALLER}" == "${ZIP_TYPE}" ]]; then + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="${OLD_DATA_DIR}/etc/artifactory.properties" + else + local newfilePath="${NEW_DATA_DIR}/etc/artifactory/artifactory.properties" + local oldfilePath="/etc/opt/jfrog/artifactory/artifactory.properties" + fi + + getProductVersion "${minProductVersion}" "${maxProductVersion}" "${newfilePath}" "${oldfilePath}" "${propertyInDocker}" "${property}" +} + +getCustomDataDir_hook () { + retrieveYamlValue "migration.oldDataDir" "oldDataDir" "Fail" + OLD_DATA_DIR="${YAML_VALUE}" +} + +# Get protocol value of connector +getXmlConnectorProtocol () { + local i="$1" + local filePath="$2" + local fileName="$3" + local protocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@protocol' ${filePath}/${fileName} 2>/dev/null |awk -F"=" '{print $2}' | tr -d '"') + echo -e "${protocolValue}" +} + +# Get all attributes of connector +getXmlConnectorAttributes () { + local i="$1" + local filePath="$2" + local fileName="$3" + local connectorAttributes=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@*' ${filePath}/${fileName} 2>/dev/null) + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + echo "${connectorAttributes}" +} + +# Get port value of connector +getXmlConnectorPort () { + local i="$1" + local filePath="$2" + local fileName="$3" + local portValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@port' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${portValue}" +} + +# Get maxThreads value of connector +getXmlConnectorMaxThreads () { + local i="$1" + local filePath="$2" + local fileName="$3" + local maxThreadValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@maxThreads' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${maxThreadValue}" +} +# Get sendReasonPhrase value of connector +getXmlConnectorSendReasonPhrase () { + local i="$1" + local filePath="$2" + local fileName="$3" + local sendReasonPhraseValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sendReasonPhrase' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + echo -e "${sendReasonPhraseValue}" +} +# Get relaxedPathChars value of connector +getXmlConnectorRelaxedPathChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedPathCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedPathChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedPathCharsValue=$(io_trim "${relaxedPathCharsValue}") + echo -e "${relaxedPathCharsValue}" +} +# Get relaxedQueryChars value of connector +getXmlConnectorRelaxedQueryChars () { + local i="$1" + local filePath="$2" + local fileName="$3" + local relaxedQueryCharsValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@relaxedQueryChars' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + # strip leading and trailing spaces + relaxedQueryCharsValue=$(io_trim "${relaxedQueryCharsValue}") + echo -e "${relaxedQueryCharsValue}" +} + +# Updating system.yaml with Connector port +setConnectorPort () { + local yamlPath="$1" + local valuePort="$2" + local portYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${valuePort}" ]]; then + warn "port value is empty, could not migrate to system.yaml" + return + fi + ## Getting port yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" portYamlPath "Warning" + portYamlPath="${YAML_VALUE}" + if [[ -z "${portYamlPath}" ]]; then + return + fi + setSystemValue "${portYamlPath}" "${valuePort}" "${SYSTEM_YAML_PATH}" + logger "Setting [${portYamlPath}] with value [${valuePort}] in system.yaml" +} + +# Updating system.yaml with Connector maxThreads +setConnectorMaxThread () { + local yamlPath="$1" + local threadValue="$2" + local maxThreadYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${threadValue}" ]]; then + return + fi + ## Getting max Threads yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" maxThreadYamlPath "Warning" + maxThreadYamlPath="${YAML_VALUE}" + if [[ -z "${maxThreadYamlPath}" ]]; then + return + fi + setSystemValue "${maxThreadYamlPath}" "${threadValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${maxThreadYamlPath}] with value [${threadValue}] in system.yaml" +} + +# Updating system.yaml with Connector sendReasonPhrase +setConnectorSendReasonPhrase () { + local yamlPath="$1" + local sendReasonPhraseValue="$2" + local sendReasonPhraseYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${sendReasonPhraseValue}" ]]; then + return + fi + ## Getting sendReasonPhrase yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" sendReasonPhraseYamlPath "Warning" + sendReasonPhraseYamlPath="${YAML_VALUE}" + if [[ -z "${sendReasonPhraseYamlPath}" ]]; then + return + fi + setSystemValue "${sendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${sendReasonPhraseYamlPath}] with value [${sendReasonPhraseValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedPathChars +setConnectorRelaxedPathChars () { + local yamlPath="$1" + local relaxedPathCharsValue="$2" + local relaxedPathCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedPathCharsValue}" ]]; then + return + fi + ## Getting relaxedPathChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedPathCharsYamlPath "Warning" + relaxedPathCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedPathCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedPathCharsYamlPath}" "${relaxedPathCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedPathCharsYamlPath}] with value [${relaxedPathCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connector relaxedQueryChars +setConnectorRelaxedQueryChars () { + local yamlPath="$1" + local relaxedQueryCharsValue="$2" + local relaxedQueryCharsYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${relaxedQueryCharsValue}" ]]; then + return + fi + ## Getting relaxedQueryChars yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" relaxedQueryCharsYamlPath "Warning" + relaxedQueryCharsYamlPath="${YAML_VALUE}" + if [[ -z "${relaxedQueryCharsYamlPath}" ]]; then + return + fi + setSystemValue "${relaxedQueryCharsYamlPath}" "${relaxedQueryCharsValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${relaxedQueryCharsYamlPath}] with value [${relaxedQueryCharsValue}] in system.yaml" +} + +# Updating system.yaml with Connectors configurations +setConnectorExtraConfig () { + local yamlPath="$1" + local connectorAttributes="$2" + local extraConfigPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${connectorAttributes}" ]]; then + return + fi + ## Getting extraConfig yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConfig "Warning" + extraConfigPath="${YAML_VALUE}" + if [[ -z "${extraConfigPath}" ]]; then + return + fi + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setSystemValue "${extraConfigPath}" "${connectorAttributes}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConfigPath}] with connector attributes in system.yaml" +} + +# Updating system.yaml with extra Connectors +setExtraConnector () { + local yamlPath="$1" + local extraConnector="$2" + local extraConnectorYamlPath= + if [[ -z "${yamlPath}" ]]; then + return + fi + if [[ -z "${extraConnector}" ]]; then + return + fi + ## Getting extraConnecotr yaml path from migration info yaml + retrieveYamlValue "${yamlPath}" extraConnectorYamlPath "Warning" + extraConnectorYamlPath="${YAML_VALUE}" + if [[ -z "${extraConnectorYamlPath}" ]]; then + return + fi + getYamlValue "${extraConnectorYamlPath}" "${SYSTEM_YAML_PATH}" "false" + local connectorExtra="${YAML_VALUE}" + if [[ -z "${connectorExtra}" ]]; then + setSystemValue "${extraConnectorYamlPath}" "${extraConnector}" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + else + setSystemValue "${extraConnectorYamlPath}" "\"${connectorExtra} ${extraConnector}\"" "${SYSTEM_YAML_PATH}" + logger "Setting [${extraConnectorYamlPath}] with extra connectors in system.yaml" + fi +} + +# Migrate extra connectors to system.yaml +migrateExtraConnectors () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local excludeDefaultPort="$4" + local i="$5" + local extraConfig= + local extraConnector= + if [[ "${excludeDefaultPort}" == "yes" ]]; then + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" && "${portValue}" != "${DEFAULT_RT_PORT}" ]] || continue + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + done + else + extraConnector=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']' ${filePath}/${fileName} 2>/dev/null) + setExtraConnector "${EXTRA_CONFIG_YAMLPATH}" "${extraConnector}" + fi +} + +# Migrate connector configurations +migrateConnectorConfig () { + local i="$1" + local protocolType="$2" + local portValue="$3" + local connectorPortYamlPath="$4" + local connectorMaxThreadYamlPath="$5" + local connectorAttributesYamlPath="$6" + local filePath="$7" + local fileName="$8" + local connectorSendReasonPhraseYamlPath="$9" + local connectorRelaxedPathCharsYamlPath="${10}" + local connectorRelaxedQueryCharsYamlPath="${11}" + + # migrate port + setConnectorPort "${connectorPortYamlPath}" "${portValue}" + + # migrate maxThreads + local maxThreadValue=$(getXmlConnectorMaxThreads "$i" "${filePath}" "${fileName}") + setConnectorMaxThread "${connectorMaxThreadYamlPath}" "${maxThreadValue}" + + # migrate sendReasonPhrase + local sendReasonPhraseValue=$(getXmlConnectorSendReasonPhrase "$i" "${filePath}" "${fileName}") + setConnectorSendReasonPhrase "${connectorSendReasonPhraseYamlPath}" "${sendReasonPhraseValue}" + + # migrate relaxedPathChars + local relaxedPathCharsValue=$(getXmlConnectorRelaxedPathChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedPathChars "${connectorRelaxedPathCharsYamlPath}" "\"${relaxedPathCharsValue}\"" + # migrate relaxedQueryChars + local relaxedQueryCharsValue=$(getXmlConnectorRelaxedQueryChars "$i" "${filePath}" "${fileName}") + setConnectorRelaxedQueryChars "${connectorRelaxedQueryCharsYamlPath}" "\"${relaxedQueryCharsValue}\"" + + # migrate all attributes to extra config except port , maxThread , sendReasonPhrase ,relaxedPathChars and relaxedQueryChars + local connectorAttributes=$(getXmlConnectorAttributes "$i" "${filePath}" "${fileName}") + connectorAttributes=$(echo "${connectorAttributes}" | sed 's/port="'${portValue}'"//g' | sed 's/maxThreads="'${maxThreadValue}'"//g' | sed 's/sendReasonPhrase="'${sendReasonPhraseValue}'"//g' | sed 's/relaxedPathChars="\'${relaxedPathCharsValue}'\"//g' | sed 's/relaxedQueryChars="\'${relaxedQueryCharsValue}'\"//g') + # strip leading and trailing spaces + connectorAttributes=$(io_trim "${connectorAttributes}") + setConnectorExtraConfig "${connectorAttributesYamlPath}" "${connectorAttributes}" +} + +# Check for default port 8040 and 8081 in connectors and migrate +migrateConnectorPort () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + local connectorPortYamlPath="$5" + local connectorMaxThreadYamlPath="$6" + local connectorAttributesYamlPath="$7" + local connectorSendReasonPhraseYamlPath="$8" + local connectorRelaxedPathCharsYamlPath="$9" + local connectorRelaxedQueryCharsYamlPath="${10}" + local portYamlPath= + local maxThreadYamlPath= + local status= + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" == *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + RT_DEFAULTPORT_STATUS=success + else + AC_DEFAULTPORT_STATUS=success + fi + migrateConnectorConfig "${i}" "${protocolType}" "${portValue}" "${connectorPortYamlPath}" "${connectorMaxThreadYamlPath}" "${connectorAttributesYamlPath}" "${filePath}" "${fileName}" "${connectorSendReasonPhraseYamlPath}" "${connectorRelaxedPathCharsYamlPath}" "${connectorRelaxedQueryCharsYamlPath}" + done +} + +# migrate to extra, connector having default port and protocol is AJP +migrateDefaultPortIfAjp () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local defaultPort="$4" + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] && continue + [[ "${portValue}" != "${defaultPort}" ]] && continue + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + done + +} + +# Comparing max threads in connectors +compareMaxThreads () { + local firstConnectorMaxThread="$1" + local firstConnectorNode="$2" + local secondConnectorMaxThread="$3" + local secondConnectorNode="$4" + local filePath="$5" + local fileName="$6" + + # choose higher maxThreads connector as Artifactory. + if [[ "${firstConnectorMaxThread}" -gt ${secondConnectorMaxThread} || "${firstConnectorMaxThread}" -eq ${secondConnectorMaxThread} ]]; then + # maxThread is higher in firstConnector, + # Taking firstConnector as Artifactory and SecondConnector as Access + # maxThread is equal in both connector,considering firstConnector as Artifactory and SecondConnector as Access + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + else + # maxThread is higher in SecondConnector, + # Taking SecondConnector as Artifactory and firstConnector as Access + local rtPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +# Check max threads exist to compare +maxThreadsExistToCompare () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local firstConnectorMaxThread= + local secondConnectorMaxThread= + local firstConnectorNode= + local secondConnectorNode= + local status=success + local firstnode=fail + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ ${protocolType} == *AJP* ]]; then + # Migrate Connectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + fi + # store maxthreads value of each connector + if [[ ${firstnode} == "fail" ]]; then + firstConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + firstConnectorNode="${i}" + firstnode=success + else + secondConnectorMaxThread=$(getXmlConnectorMaxThreads "${i}" "${filePath}" "${fileName}") + secondConnectorNode="${i}" + fi + done + [[ -z "${firstConnectorMaxThread}" ]] && status=fail + [[ -z "${secondConnectorMaxThread}" ]] && status=fail + # maxThreads is set, now compare MaxThreads + if [[ "${status}" == "success" ]]; then + compareMaxThreads "${firstConnectorMaxThread}" "${firstConnectorNode}" "${secondConnectorMaxThread}" "${secondConnectorNode}" "${filePath}" "${fileName}" + else + # Assume first connector is RT, maxThreads is not set in both connectors + local rtPortValue=$(getXmlConnectorPort "${firstConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${firstConnectorNode}" "${protocolType}" "${rtPortValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + local acPortValue=$(getXmlConnectorPort "${secondConnectorNode}" "${filePath}" "${fileName}") + migrateConnectorConfig "${secondConnectorNode}" "${protocolType}" "${acPortValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi +} + +migrateExtraBasedOnNonAjpCount () { + local nonAjpCount="$1" + local filePath="$2" + local fileName="$3" + local connectorCount="$4" + local i="$5" + + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + if [[ "${protocolType}" == *AJP* ]]; then + if [[ "${nonAjpCount}" -eq 1 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "no" "${i}" + continue + else + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + continue + fi + fi +} + +# find RT and AC Connector +findRtAndAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local initialAjpCount=0 + local nonAjpCount=0 + + # get the count of non AJP + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${protocolType}" != *AJP* ]] || continue + nonAjpCount=$((initialAjpCount+1)) + initialAjpCount="${nonAjpCount}" + done + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access and artifactory connectors + # Mark port as 8040 for access + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + done + elif [[ "${nonAjpCount}" -eq 2 ]]; then + # compare maxThreads in both connectors + maxThreadsExistToCompare "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${nonAjpCount}" -gt 2 ]]; then + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # setting with default port in system.yaml + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# get the count of non AJP +getCountOfNonAjp () { + local port="$1" + local connectorCount="$2" + local filePath=$3 + local fileName=$4 + local initialNonAjpCount=0 + + for ((i = 1 ; i <= "${connectorCount}" ; i++)); + do + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + local protocolType=$(getXmlConnectorProtocol "$i" "${filePath}" "${fileName}") + [[ "${portValue}" != "${port}" ]] || continue + [[ "${protocolType}" != *AJP* ]] || continue + local nonAjpCount=$((initialNonAjpCount+1)) + initialNonAjpCount="${nonAjpCount}" + done + echo -e "${nonAjpCount}" +} + +# Find for access connector +findAcConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_RT_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as access connector and mark port as that of connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take RT properties into access with 8040 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_RT_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add RT connector details as access connector and mark port as 8040 + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + setConnectorPort "${AC_PORT_YAMLPATH}" "${DEFAULT_ACCESS_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +# Find for artifactory connector +findRtConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + + # get the count of non AJP + local nonAjpCount=$(getCountOfNonAjp "${DEFAULT_ACCESS_PORT}" "${connectorCount}" "${filePath}" "${fileName}") + if [[ "${nonAjpCount}" -eq 1 ]]; then + # Add the connector found as RT connector + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" != "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + fi + done + elif [[ "${nonAjpCount}" -gt 1 ]]; then + # Take access properties into artifactory with 8081 + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + migrateExtraBasedOnNonAjpCount "${nonAjpCount}" "${filePath}" "${fileName}" "${connectorCount}" "$i" + local portValue=$(getXmlConnectorPort "$i" "${filePath}" "${fileName}") + if [[ "${portValue}" == "${DEFAULT_ACCESS_PORT}" ]]; then + migrateConnectorConfig "$i" "${protocolType}" "${portValue}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${filePath}" "${fileName}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + fi + done + elif [[ "${nonAjpCount}" -eq 0 ]]; then + # Add access connector details as RT connector and mark as ${DEFAULT_RT_PORT} + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + setConnectorPort "${RT_PORT_YAMLPATH}" "${DEFAULT_RT_PORT}" + # migrateExtraConnectors + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + fi +} + +checkForTlsConnector () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + for ((i = 1 ; i <= "${connectorCount}" ; i++)) + do + local sslProtocolValue=$($LIBXML2_PATH --xpath '//Server/Service/Connector['$i']/@sslProtocol' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${sslProtocolValue}" == "TLS" ]]; then + bannerImportant "NOTE: Ignoring TLS connector during migration, modify the system yaml to enable TLS. Original server.xml is saved in path [${filePath}/${fileName}]" + TLS_CONNECTOR_EXISTS=${FLAG_Y} + continue + fi + done +} + +# set custom tomcat server Listeners to system.yaml +setListenerConnector () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + for ((i = 1 ; i <= "${listenerCount}" ; i++)) + do + local listenerConnector=$($LIBXML2_PATH --xpath '//Server/Listener['$i']' ${filePath}/${fileName} 2>/dev/null) + local listenerClassName=$($LIBXML2_PATH --xpath '//Server/Listener['$i']/@className' ${filePath}/${fileName} 2>/dev/null | awk -F"=" '{print $2}' | tr -d '"') + if [[ "${listenerClassName}" == *Apr* ]]; then + setExtraConnector "${EXTRA_LISTENER_CONFIG_YAMLPATH}" "${listenerConnector}" + fi + done +} +# add custom tomcat server Listeners +addTomcatServerListeners () { + local filePath="$1" + local fileName="$2" + local listenerCount="$3" + if [[ "${listenerCount}" == "0" ]]; then + logger "No listener connectors found in the [${filePath}/${fileName}],skipping migration of listener connectors" + else + setListenerConnector "${filePath}" "${fileName}" "${listenerCount}" + setSystemValue "${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}" "true" "${SYSTEM_YAML_PATH}" + logger "Setting [${RT_TOMCAT_HTTPSCONNECTOR_ENABLED}] with value [true] in system.yaml" + fi +} + +# server.xml migration operations +xmlMigrateOperation () { + local filePath="$1" + local fileName="$2" + local connectorCount="$3" + local listenerCount="$4" + RT_DEFAULTPORT_STATUS=fail + AC_DEFAULTPORT_STATUS=fail + TLS_CONNECTOR_EXISTS=${FLAG_N} + + # Check for connector with TLS , if found ignore migrating it + checkForTlsConnector "${filePath}" "${fileName}" "${connectorCount}" + if [[ "${TLS_CONNECTOR_EXISTS}" == "${FLAG_Y}" ]]; then + return + fi + addTomcatServerListeners "${filePath}" "${fileName}" "${listenerCount}" + # Migrate RT default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" "${RT_PORT_YAMLPATH}" "${RT_MAXTHREADS_YAMLPATH}" "${RT_EXTRACONFIG_YAMLPATH}" "${RT_SENDREASONPHRASE_YAMLPATH}" "${RT_RELAXEDPATHCHARS_YAMLPATH}" "${RT_RELAXEDQUERYCHARS_YAMLPATH}" + # Migrate to extra if RT default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_RT_PORT}" + # Migrate AC default port from connectors + migrateConnectorPort "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" "${AC_PORT_YAMLPATH}" "${AC_MAXTHREADS_YAMLPATH}" "${AC_EXTRACONFIG_YAMLPATH}" "${AC_SENDREASONPHRASE_YAMLPATH}" + # Migrate to extra if access default ports are AJP + migrateDefaultPortIfAjp "${filePath}" "${fileName}" "${connectorCount}" "${DEFAULT_ACCESS_PORT}" + + if [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # RT and AC default port found + logger "Artifactory 8081 and Access 8040 default port are found" + migrateExtraConnectors "${filePath}" "${fileName}" "${connectorCount}" "yes" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "success" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # Only AC default port found,find RT connector + logger "Found Access default 8040 port" + findRtConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "success" ]]; then + # Only RT default port found,find AC connector + logger "Found Artifactory default 8081 port" + findAcConnector "${filePath}" "${fileName}" "${connectorCount}" + elif [[ "${AC_DEFAULTPORT_STATUS}" == "fail" && "${RT_DEFAULTPORT_STATUS}" == "fail" ]]; then + # RT and AC default port not found, find connector + logger "Artifactory 8081 and Access 8040 default port are not found" + findRtAndAcConnector "${filePath}" "${fileName}" "${connectorCount}" + fi +} + +# get count of connectors +getXmlConnectorCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Service/Connector)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# get count of listener connectors +getTomcatServerListenersCount () { + local filePath="$1" + local fileName="$2" + local count=$($LIBXML2_PATH --xpath 'count(/Server/Listener)' ${filePath}/${fileName}) + echo -e "${count}" +} + +# Migrate server.xml configuration to system.yaml +migrateXmlFile () { + local xmlFiles= + local fileName= + local filePath= + local sourceFilePath= + DEFAULT_ACCESS_PORT="8040" + DEFAULT_RT_PORT="8081" + AC_PORT_YAMLPATH="migration.xmlFiles.serverXml.access.port" + AC_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.access.maxThreads" + AC_SENDREASONPHRASE_YAMLPATH="migration.xmlFiles.serverXml.access.sendReasonPhrase" + AC_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.access.extraConfig" + RT_PORT_YAMLPATH="migration.xmlFiles.serverXml.artifactory.port" + RT_MAXTHREADS_YAMLPATH="migration.xmlFiles.serverXml.artifactory.maxThreads" + RT_SENDREASONPHRASE_YAMLPATH='migration.xmlFiles.serverXml.artifactory.sendReasonPhrase' + RT_RELAXEDPATHCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedPathChars' + RT_RELAXEDQUERYCHARS_YAMLPATH='migration.xmlFiles.serverXml.artifactory.relaxedQueryChars' + RT_EXTRACONFIG_YAMLPATH="migration.xmlFiles.serverXml.artifactory.extraConfig" + ROUTER_PORT_YAMLPATH="migration.xmlFiles.serverXml.router.port" + EXTRA_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.config" + EXTRA_LISTENER_CONFIG_YAMLPATH="migration.xmlFiles.serverXml.extra.listener" + RT_TOMCAT_HTTPSCONNECTOR_ENABLED="artifactory.tomcat.httpsConnector.enabled" + + retrieveYamlValue "migration.xmlFiles" "xmlFiles" "Skip" + xmlFiles="${YAML_VALUE}" + if [[ -z "${xmlFiles}" ]]; then + return + fi + bannerSection "PROCESSING MIGRATION OF XML FILES" + retrieveYamlValue "migration.xmlFiles.serverXml.fileName" "fileName" "Warning" + fileName="${YAML_VALUE}" + if [[ -z "${fileName}" ]]; then + return + fi + bannerSubSection "Processing Migration of $fileName" + retrieveYamlValue "migration.xmlFiles.serverXml.filePath" "filePath" "Warning" + filePath="${YAML_VALUE}" + if [[ -z "${filePath}" ]]; then + return + fi + # prepend NEW_DATA_DIR only if filePath is relative path + sourceFilePath=$(prependDir "${filePath}" "${NEW_DATA_DIR}/${filePath}") + if [[ "$(checkFileExists "${sourceFilePath}/${fileName}")" == "true" ]]; then + logger "File [${fileName}] is found in path [${sourceFilePath}]" + local connectorCount=$(getXmlConnectorCount "${sourceFilePath}" "${fileName}") + if [[ "${connectorCount}" == "0" ]]; then + logger "No connectors found in the [${filePath}/${fileName}],skipping migration of xml configuration" + return + fi + local listenerCount=$(getTomcatServerListenersCount "${sourceFilePath}" "${fileName}") + xmlMigrateOperation "${sourceFilePath}" "${fileName}" "${connectorCount}" "${listenerCount}" + else + logger "File [${fileName}] is not found in path [${sourceFilePath}] to migrate" + fi +} + +compareArtifactoryUser () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + + if [[ "${oldPropertyValue}" != "${newPropertyValue}" ]]; then + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" + else + logger "No change in property [${property}] value in [${sourceFile}] to migrate" + fi +} + +migrateReplicator () { + local property="$1" + local oldPropertyValue="$2" + local yamlPath="$3" + + setSystemValue "${yamlPath}" "${oldPropertyValue}" "${SYSTEM_YAML_PATH}" + logger "Setting [${yamlPath}] with value of the property [${property}] in system.yaml" +} + +compareJavaOptions () { + local property="$1" + local oldPropertyValue="$2" + local newPropertyValue="$3" + local yamlPath="$4" + local sourceFile="$5" + local oldJavaOption= + local newJavaOption= + local extraJavaOption= + local check=false + local success=true + local status=true + + oldJavaOption=$(echo "${oldPropertyValue}" | awk 'BEGIN{FS=OFS="\""}{for(i=2;i= 10.1.0" + ## Description / note + description: This CVE needs to be fixed in the alpine base image of nginx container. diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/NOTES.txt b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/NOTES.txt new file mode 100644 index 000000000..5afe10d1d --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/NOTES.txt @@ -0,0 +1,85 @@ +Congratulations. You have just deployed JFrog Artifactory! +{{- if .Values.artifactory.masterKey }} +{{- if and (not .Values.artifactory.masterKeySecretName) (eq .Values.artifactory.masterKey "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") }} + + +***************************************** WARNING ****************************************** +* Your Artifactory master key is still set to the provided example: * +* artifactory.masterKey=FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF * +* * +* You should change this to your own generated key: * +* $ export MASTER_KEY=$(openssl rand -hex 32) * +* $ echo ${MASTER_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.masterKey=${MASTER_KEY}' * +* * +* Alternatively, you can use a pre-existing secret with a key called master-key with * +* '--set artifactory.masterKeySecretName=${SECRET_NAME}' * +******************************************************************************************** +{{- end }} +{{- end }} + +{{- if .Values.artifactory.joinKey }} +{{- if eq .Values.artifactory.joinKey "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" }} + + +***************************************** WARNING ****************************************** +* Your Artifactory join key is still set to the provided example: * +* artifactory.joinKey=EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE * +* * +* You should change this to your own generated key: * +* $ export JOIN_KEY=$(openssl rand -hex 32) * +* $ echo ${JOIN_KEY} * +* * +* Pass the created master key to helm with '--set artifactory.joinKey=${JOIN_KEY}' * +* * +******************************************************************************************** +{{- end }} +{{- end }} + +1. Get the Artifactory URL by running these commands: + + {{- if .Values.ingress.enabled }} + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} + + {{- else if contains "NodePort" .Values.nginx.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "artifactory.nginx.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + + {{- else if contains "LoadBalancer" .Values.nginx.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of the service by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "artifactory.nginx.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory.nginx.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ + + {{- else if contains "ClusterIP" .Values.nginx.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "component={{ .Values.nginx.name }}" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:{{ .Values.nginx.externalPortHttp }} + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME {{ .Values.nginx.externalPortHttp }}:{{ .Values.nginx.internalPortHttp }} + + {{- end }} + +2. Open Artifactory in your browser + Default credential for Artifactory: + user: admin + password: password + +{{ if .Values.artifactory.javaOpts.jmx.enabled }} +JMX configuration: +{{- if not (contains "LoadBalancer" .Values.artifactory.service.type) }} +If you want to access JMX from you computer with jconsole, you should set ".Values.artifactory.service.type=LoadBalancer" !!! +{{ end }} + +1. Get the Artifactory service IP: +export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "artifactory.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + +2. Map the service name to the service IP in /etc/hosts: +sudo sh -c "echo \"${SERVICE_IP} {{ template "artifactory.fullname" . }}\" >> /etc/hosts" + +3. Launch jconsole: +jconsole {{ template "artifactory.fullname" . }}:{{ .Values.artifactory.javaOpts.jmx.port }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/_helpers.tpl b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/_helpers.tpl new file mode 100644 index 000000000..14b571f91 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/_helpers.tpl @@ -0,0 +1,237 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "artifactory.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name nginx service. +*/}} +{{- define "artifactory.nginx.name" -}} +{{- default .Chart.Name .Values.nginx.name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "artifactory.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified replicator app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "artifactory.replicator.fullname" -}} +{{- if .Values.artifactory.replicator.ingress.name -}} +{{- .Values.artifactory.replicator.ingress.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-replication" .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified nginx name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "artifactory.nginx.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nginx.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "artifactory.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "artifactory.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "artifactory.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate SSL certificates +*/}} +{{- define "artifactory.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "artifactory.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "artifactory.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "artifactory-ca" 365 -}} +{{- $cert := genSignedCert ( include "artifactory.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Scheme (http/https) based on Access TLS enabled/disabled +*/}} +{{- define "artifactory.scheme" -}} +{{- if .Values.access.accessConfig.security.tls -}} +{{- printf "%s" "https" -}} +{{- else -}} +{{- printf "%s" "http" -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKey value +*/}} +{{- define "artifactory.joinKey" -}} +{{- if .Values.global.joinKey -}} +{{- .Values.global.joinKey -}} +{{- else if .Values.artifactory.joinKey -}} +{{- .Values.artifactory.joinKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKey value +*/}} +{{- define "artifactory.masterKey" -}} +{{- if .Values.global.masterKey -}} +{{- .Values.global.masterKey -}} +{{- else if .Values.artifactory.masterKey -}} +{{- .Values.artifactory.masterKey -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve joinKeySecretName value +*/}} +{{- define "artifactory.joinKeySecretName" -}} +{{- if .Values.global.joinKeySecretName -}} +{{- .Values.global.joinKeySecretName -}} +{{- else if .Values.artifactory.joinKeySecretName -}} +{{- .Values.artifactory.joinKeySecretName -}} +{{- else -}} +{{ include "artifactory.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve masterKeySecretName value +*/}} +{{- define "artifactory.masterKeySecretName" -}} +{{- if .Values.global.masterKeySecretName -}} +{{- .Values.global.masterKeySecretName -}} +{{- else if .Values.artifactory.masterKeySecretName -}} +{{- .Values.artifactory.masterKeySecretName -}} +{{- else -}} +{{ include "artifactory.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Resolve imagePullSecrets value +*/}} +{{- define "artifactory.imagePullSecrets" -}} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainersBegin value +*/}} +{{- define "artifactory.customInitContainersBegin" -}} +{{- if .Values.global.customInitContainersBegin -}} +{{- .Values.global.customInitContainersBegin -}} +{{- else if .Values.artifactory.customInitContainersBegin -}} +{{- .Values.artifactory.customInitContainersBegin -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customInitContainers value +*/}} +{{- define "artifactory.customInitContainers" -}} +{{- if .Values.global.customInitContainers -}} +{{- .Values.global.customInitContainers -}} +{{- else if .Values.artifactory.customInitContainers -}} +{{- .Values.artifactory.customInitContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumes value +*/}} +{{- define "artifactory.customVolumes" -}} +{{- if .Values.global.customVolumes -}} +{{- .Values.global.customVolumes -}} +{{- else if .Values.artifactory.customVolumes -}} +{{- .Values.artifactory.customVolumes -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customVolumeMounts value +*/}} +{{- define "artifactory.customVolumeMounts" -}} +{{- if .Values.global.customVolumeMounts -}} +{{- .Values.global.customVolumeMounts -}} +{{- else if .Values.artifactory.customVolumeMounts -}} +{{- .Values.artifactory.customVolumeMounts -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve customSidecarContainers value +*/}} +{{- define "artifactory.customSidecarContainers" -}} +{{- if .Values.global.customSidecarContainers -}} +{{- .Values.global.customSidecarContainers -}} +{{- else if .Values.artifactory.customSidecarContainers -}} +{{- .Values.artifactory.customSidecarContainers -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory chart image names +*/}} +{{- define "artifactory.getImageInfoByValue" -}} +{{- $dot := index . 0 }} +{{- $indexReference := index . 1 }} +{{- $registryName := index $dot.Values $indexReference "image" "registry" -}} +{{- $repositoryName := index $dot.Values $indexReference "image" "repository" -}} +{{- $tag := default $dot.Chart.AppVersion (index $dot.Values $indexReference "image" "tag") | toString -}} +{{- if $dot.Values.global }} + {{- if and $dot.Values.global.versions.artifactory (or (eq $indexReference "artifactory") (eq $indexReference "nginx") ) }} + {{- $tag = $dot.Values.global.versions.artifactory | toString -}} + {{- end -}} + {{- if $dot.Values.global.imageRegistry }} + {{- printf "%s/%s:%s" $dot.Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper artifactory app version +*/}} +{{- define "artifactory.app.version" -}} +{{- $image := split ":" ((include "artifactory.getImageInfoByValue" (list . "artifactory")) | toString) -}} +{{- $tag := $image._1 -}} +{{- printf "%s" $tag -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/additional-resources.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/additional-resources.yaml new file mode 100644 index 000000000..c4d06f08a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/additional-resources.yaml @@ -0,0 +1,3 @@ +{{ if .Values.additionalResources }} +{{ tpl .Values.additionalResources . }} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/admin-bootstrap-creds.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/admin-bootstrap-creds.yaml new file mode 100644 index 000000000..c8bf2eb16 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/admin-bootstrap-creds.yaml @@ -0,0 +1,15 @@ +{{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} +{{- if .Values.artifactory.admin.password }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-bootstrap-creds + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + bootstrap.creds: {{ (printf "%s@%s=%s" .Values.artifactory.admin.username .Values.artifactory.admin.ip .Values.artifactory.admin.password) | b64enc }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-access-config.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-access-config.yaml new file mode 100644 index 000000000..304304e4e --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-access-config.yaml @@ -0,0 +1,15 @@ +{{- if .Values.access.accessConfig }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-access-config + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + access.config.patch.yml: | +{{ tpl (toYaml .Values.access.accessConfig) . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-binarystore-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-binarystore-secret.yaml new file mode 100644 index 000000000..bbfbdf3be --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-binarystore-secret.yaml @@ -0,0 +1,14 @@ +{{- if not .Values.artifactory.persistence.customBinarystoreXmlSecret }} +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-binarystore + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +stringData: + binarystore.xml: |- +{{ tpl .Values.artifactory.persistence.binarystoreXml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-configmaps.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-configmaps.yaml new file mode 100644 index 000000000..359fa07d2 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-configmaps.yaml @@ -0,0 +1,13 @@ +{{ if .Values.artifactory.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-configmaps + labels: + app: {{ template "artifactory.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.artifactory.configMaps . | indent 2 }} +{{ end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-custom-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-custom-secrets.yaml new file mode 100644 index 000000000..a2e3895f5 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if .Values.artifactory.customSecrets }} +{{- range .Values.artifactory.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" $ }}-{{ .name }} + labels: + app: "{{ template "artifactory.name" $ }}" + chart: "{{ template "artifactory.chart" $ }}" + component: "{{ $.Values.artifactory.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-database-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-database-secrets.yaml new file mode 100644 index 000000000..9edf87dc1 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-database-secrets.yaml @@ -0,0 +1,22 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-database-creds + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-installer-info.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-installer-info.yaml new file mode 100644 index 000000000..f2e2c0f5b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-installer-info.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "artifactory.fullname" . }}-installer-info + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + installer-info.json: | + {{ tpl .Values.installerInfo . }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-license-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-license-secret.yaml new file mode 100644 index 000000000..d37ed1e4b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-license-secret.yaml @@ -0,0 +1,14 @@ +{{- with .Values.artifactory.license.licenseKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" $ }}-license + labels: + app: {{ template "artifactory.name" $ }} + chart: {{ template "artifactory.chart" $ }} + heritage: {{ $.Release.Service }} + release: {{ $.Release.Name }} +type: Opaque +data: + artifactory.lic: {{ . | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-migration-scripts.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-migration-scripts.yaml new file mode 100644 index 000000000..4b1ba4027 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-migration-scripts.yaml @@ -0,0 +1,18 @@ +{{- if .Values.artifactory.migration.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-migration-scripts + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + migrate.sh: | +{{ .Files.Get "files/migrate.sh" | indent 4 }} + migrationHelmInfo.yaml: | +{{ .Files.Get "files/migrationHelmInfo.yaml" | indent 4 }} + migrationStatus.sh: | +{{ .Files.Get "files/migrationStatus.sh" | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-networkpolicy.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-networkpolicy.yaml new file mode 100644 index 000000000..da6fd178a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "artifactory.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "artifactory.name" $ }} + chart: {{ template "artifactory.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-priority-class.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-priority-class.yaml new file mode 100644 index 000000000..9eb94b35b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-priority-class.yaml @@ -0,0 +1,9 @@ +{{- if .Values.artifactory.priorityClass.create }} +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: {{ default (include "artifactory.fullname" .) .Values.artifactory.priorityClass.name }} +value: {{ .Values.artifactory.priorityClass.value }} +globalDefault: false +description: "Artifactory priority class" +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-role.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-role.yaml new file mode 100644 index 000000000..db01f235f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-role.yaml @@ -0,0 +1,14 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.fullname" . }} +rules: +{{ toYaml .Values.rbac.role.rules }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-rolebinding.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-rolebinding.yaml new file mode 100644 index 000000000..dfb42258f --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "artifactory.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "artifactory.fullname" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-secrets.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-secrets.yaml new file mode 100644 index 000000000..3ac8e3a74 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-secrets.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- if or .Values.artifactory.masterKey .Values.global.masterKey }} + {{- if not (or .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName) }} + master-key: {{ include "artifactory.masterKey" . | b64enc | quote }} + {{- end }} + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey }} + {{- if not (or .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName) }} + join-key: {{ include "artifactory.joinKey" . | b64enc | quote }} + {{- end }} + {{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-service.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-service.yaml new file mode 100644 index 000000000..27f552979 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-service.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- if .Values.artifactory.service.annotations }} + annotations: +{{ toYaml .Values.artifactory.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.artifactory.service.type }} +{{- if .Values.artifactory.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.artifactory.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.artifactory.externalPort }} + targetPort: {{ .Values.artifactory.internalPort }} + protocol: TCP + name: http-router + - port: {{ .Values.artifactory.externalArtifactoryPort }} + targetPort: {{ .Values.artifactory.internalArtifactoryPort }} + protocol: TCP + name: http-artifactory + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.artifactory.ssh.externalPort }} + targetPort: {{ .Values.artifactory.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + {{- with .Values.artifactory.javaOpts.jmx }} + {{- if .enabled }} + - port: {{ .port }} + targetPort: {{ .port }} + protocol: TCP + name: tcp-jmx + {{- end }} + {{- end }} + selector: + app: {{ template "artifactory.name" . }} + component: "{{ .Values.artifactory.name }}" + release: {{ .Release.Name }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-serviceaccount.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-serviceaccount.yaml new file mode 100644 index 000000000..4e9c5fbb5 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "artifactory.serviceAccountName" . }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-statefulset.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-statefulset.yaml new file mode 100644 index 000000000..524fcbcbf --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-statefulset.yaml @@ -0,0 +1,734 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "artifactory.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.artifactory.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- with .Values.artifactory.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/artifactory/CHANGELOG.md) \nNote: This applies only when you are using bundled postgresql (postgresql.enabled=true) \nIf you are upgrading from a chart version (< 11.x) that has postgresql.image.tag of 9.x or 10.x, make sure to pass the current postgresql.image.tag and set databaseUpgradeReady=true \nOR \nIf you are upgrading from a chart version (>= 11.x), just set databaseUpgradeReady=true \n" .Values.databaseUpgradeReady | quote }} +{{- end }} +spec: + serviceName: {{ template "artifactory.name" . }} + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "artifactory.name" . }} + role: {{ template "artifactory.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + role: {{ template "artifactory.name" . }} + component: {{ .Values.artifactory.name }} + release: {{ .Release.Name }} + {{- with .Values.artifactory.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + checksum/database-secrets: {{ include (print $.Template.BasePath "/artifactory-database-secrets.yaml") . | sha256sum }} + checksum/binarystore: {{ include (print $.Template.BasePath "/artifactory-binarystore-secret.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/artifactory-system-yaml.yaml") . | sha256sum }} + {{- if .Values.access.accessConfig }} + checksum/access-config: {{ include (print $.Template.BasePath "/artifactory-access-config.yaml") . | sha256sum }} + {{- end }} + {{- if not (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) }} + checksum/admin-creds: {{ include (print $.Template.BasePath "/admin-bootstrap-creds.yaml") . | sha256sum }} + {{- end }} + {{- range $key, $value := .Values.artifactory.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + {{- if .Values.artifactory.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.artifactory.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.artifactory.priorityClass.create }} + priorityClassName: {{ default (include "artifactory.fullname" .) .Values.artifactory.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "artifactory.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.artifactory.terminationGracePeriodSeconds }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.setSecurityContext }} + securityContext: + runAsUser: {{ .Values.artifactory.uid }} + fsGroup: {{ .Values.artifactory.gid }} + {{- end }} + initContainers: + {{- if or .Values.artifactory.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "artifactory.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + {{- if .Values.artifactory.persistence.enabled }} + {{- if .Values.artifactory.deleteDBPropertiesOnStartup }} + - name: "delete-db-properties" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -fv {{ .Values.artifactory.persistence.mountPath }}/etc/db.properties' + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- end }} + - name: "remove-lost-found" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - 'rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found {{ .Values.artifactory.persistence.mountPath }}/data/.lock' + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: "access-bootstrap-creds" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Preparing {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -Lrf /tmp/access/bootstrap.creds {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + chmod 600 {{ .Values.artifactory.persistence.mountPath }}/etc/access/bootstrap.creds; + volumeMounts: + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + - name: access-bootstrap-creds + mountPath: "/tmp/access/bootstrap.creds" + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + subPath: {{ .Values.artifactory.admin.dataKey }} + {{- else }} + subPath: bootstrap.creds + {{- end }} + {{- end }} + - name: 'copy-system-yaml' + image: '{{ .Values.initContainerImage }}' + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - '/bin/sh' + - '-c' + - > + echo "Copy system.yaml to {{ .Values.artifactory.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access/keys/trusted; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.artifactory.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.artifactory.persistence.mountPath }}/lost+found; + {{- if .Values.access.accessConfig }} + echo "Copy access.config.patch.yml to {{ .Values.artifactory.persistence.mountPath }}/etc/access"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/access; + cp -fv /tmp/etc/access.config.patch.yml {{ .Values.artifactory.persistence.mountPath }}/etc/access/access.config.patch.yml; + {{- end }} + {{- if .Values.access.resetAccessCAKeys }} + echo "Resetting Access CA Keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + touch {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/reset_ca_keys; + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + echo "Copying custom certificates to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys; + cp -fv /tmp/etc/tls.crt {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.crt; + cp -fv /tmp/etc/tls.key {{ .Values.artifactory.persistence.mountPath }}/bootstrap/etc/access/keys/ca.private.key; + {{- end }} + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security; + echo -n ${ARTIFACTORY_JOIN_KEY} > {{ .Values.artifactory.persistence.mountPath }}/bootstrap/access/etc/security/join.key; + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.artifactory.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/etc/security; + echo -n ${ARTIFACTORY_MASTER_KEY} > {{ .Values.artifactory.persistence.mountPath }}/etc/security/master.key; + {{- end }} + env: + {{- if or .Values.artifactory.joinKey .Values.global.joinKey .Values.artifactory.joinKeySecretName .Values.global.joinKeySecretName }} + - name: ARTIFACTORY_JOIN_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory.joinKeySecretName" . }} + key: join-key + {{- end }} + {{- if or .Values.artifactory.masterKey .Values.global.masterKey .Values.artifactory.masterKeySecretName .Values.global.masterKeySecretName }} + - name: ARTIFACTORY_MASTER_KEY + valueFrom: + secretKeyRef: + name: {{ include "artifactory.masterKeySecretName" . }} + key: master-key + {{- end }} + volumeMounts: + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else if .Values.artifactory.systemYaml }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + mountPath: "/tmp/etc/access.config.patch.yml" + subPath: access.config.patch.yml + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + mountPath: "/tmp/etc/tls.crt" + subPath: tls.crt + - name: access-certs + mountPath: "/tmp/etc/tls.key" + subPath: tls.key + {{- end }} + {{- if and .Values.artifactory.customPersistentPodVolumeClaim (not .Values.artifactory.customPersistentPodVolumeClaim.skipPrepareContainer) }} + - name: "prepare-custom-persistent-volume" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + echo "Setting ownership {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} on PVC {{ .Values.artifactory.customPersistentPodVolumeClaim.name }}" + chown -Rv {{ .Values.artifactory.uid }}:{{ .Values.artifactory.gid }} {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + securityContext: + runAsUser: 0 + volumeMounts: + - name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + mountPath: {{ .Values.artifactory.customPersistentPodVolumeClaim.mountPath }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ .Values.initContainerImage }}" + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'sh' + - '-c' + - > + until nc -z -w 2 {{ .Release.Name }}-postgresql {{ .Values.postgresql.service.port }} && echo database ok; do + sleep 2; + done; + {{- end }} + {{- end }} + {{- if or .Values.artifactory.customInitContainers .Values.global.customInitContainers }} +{{ tpl (include "artifactory.customInitContainers" .) . | indent 6 }} + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: 'migration-artifactory' + image: {{ include "artifactory.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + {{- if .Values.artifactory.migration.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.migration.preStartCommand . }}; + {{- end }} + scriptsPath="/opt/jfrog/artifactory/app/bin"; + mkdir -p $scriptsPath; + echo "Copy migration scripts and Run migration"; + cp -fv /tmp/migrate.sh $scriptsPath/migrate.sh; + cp -fv /tmp/migrationHelmInfo.yaml $scriptsPath/migrationHelmInfo.yaml; + cp -fv /tmp/migrationStatus.sh $scriptsPath/migrationStatus.sh; + mkdir -p {{ .Values.artifactory.persistence.mountPath }}/log; + bash $scriptsPath/migrationStatus.sh {{ include "artifactory.app.version" . }} {{ .Values.artifactory.migration.timeoutSeconds }} > >(tee {{ .Values.artifactory.persistence.mountPath }}/log/helm-migration.log) 2>&1; + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + volumeMounts: + - name: migration-scripts + mountPath: "/tmp/migrate.sh" + subPath: migrate.sh + - name: migration-scripts + mountPath: "/tmp/migrationHelmInfo.yaml" + subPath: migrationHelmInfo.yaml + - name: migration-scripts + mountPath: "/tmp/migrationStatus.sh" + subPath: migrationStatus.sh + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory.customVolumeMounts" .) . | indent 8 }} + {{- end }} +{{- end }} + containers: + - name: {{ .Values.artifactory.name }} + image: {{ include "artifactory.getImageInfoByValue" (list . "artifactory") }} + imagePullPolicy: {{ .Values.artifactory.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + command: + - '/bin/bash' + - '-c' + - > + set -e; + if [ -d /artifactory_extra_conf ] && [ -d /artifactory_bootstrap ]; then + echo "Copying bootstrap config from /artifactory_extra_conf to /artifactory_bootstrap"; + cp -Lrfv /artifactory_extra_conf/ /artifactory_bootstrap/; + fi; + {{- if .Values.artifactory.configMapName }} + echo "Copying bootstrap configs"; + cp -Lrf /bootstrap/* /artifactory_bootstrap/; + {{- end }} + {{- if .Values.artifactory.userPluginSecrets }} + echo "Copying plugins"; + cp -Lrf /tmp/plugin/*/* /artifactory_bootstrap/plugins; + {{- end }} + {{- range .Values.artifactory.copyOnEveryStartup }} + {{- $targetPath := printf "%s/%s" $.Values.artifactory.persistence.mountPath .target }} + {{- $baseDirectory := regexFind ".*/" $targetPath }} + mkdir -p {{ $baseDirectory }}; + cp -Lrf {{ .source }} {{ $.Values.artifactory.persistence.mountPath }}/{{ .target }}; + {{- end }} + {{- if .Values.artifactory.preStartCommand }} + echo "Running custom preStartCommand command"; + {{ tpl .Values.artifactory.preStartCommand . }}; + {{- end }} + exec /entrypoint-artifactory.sh + {{- with .Values.artifactory.postStartCommand }} + lifecycle: + postStart: + exec: + command: + - '/bin/bash' + - '-c' + - > + echo "Running custom postStartCommand command"; + {{ tpl . $ }} + {{- end }} + env: + {{- if and (not .Values.waitForDatabase) (not .Values.postgresql.enabled) }} + - name: SKIP_WAIT_FOR_EXTERNAL_DB + value: "true" + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-user + {{- end }} + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: JF_SHARED_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: postgresql-password + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + name: {{ template "artifactory.fullname" . }}-database-creds + key: db-url + {{- end }} + {{- end }} +{{- with .Values.artifactory.extraEnvironmentVariables }} +{{ tpl (toYaml .) $ | indent 8 }} +{{- end }} + ports: + - containerPort: {{ .Values.artifactory.internalPort }} + name: http + - containerPort: {{ .Values.artifactory.internalArtifactoryPort }} + name: http-internal + {{- if .Values.artifactory.javaOpts.jmx.enabled }} + - containerPort: {{ .Values.artifactory.javaOpts.jmx.port }} + name: tcp-jmx + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.artifactory.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + mountPath: "/artifactory_bootstrap/plugins/" + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + mountPath: "/tmp/plugin/{{ tpl . $ }}" + {{- end }} + {{- end }} + - name: artifactory-volume + mountPath: {{ .Values.artifactory.persistence.mountPath | quote }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + mountPath: "/bootstrap/" + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + mountPath: "{{ .Values.artifactory.persistence.nfs.dataDir }}" + - name: artifactory-backup + mountPath: "{{ .Values.artifactory.persistence.nfs.backupDir }}" + {{- else }} + - name: binarystore-xml + mountPath: "/artifactory_bootstrap/binarystore.xml" + subPath: binarystore.xml + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + mountPath: "/artifactory_bootstrap/artifactory.lic" + {{- if .Values.artifactory.license.secret }} + subPath: {{ .Values.artifactory.license.dataKey }} + {{- else if .Values.artifactory.license.licenseKey }} + subPath: artifactory.lic + {{- end }} + {{- end }} + - name: installer-info + mountPath: "/artifactory_bootstrap/info/installer-info.json" + subPath: installer-info.json + {{- if or .Values.artifactory.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "artifactory.customVolumeMounts" .) . | indent 8 }} + {{- end }} + resources: +{{ toYaml .Values.artifactory.resources | indent 10 }} + {{- if .Values.artifactory.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.artifactory.readinessProbe.path }} + scheme: {{ include "artifactory.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.artifactory.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.artifactory.livenessProbe.path }} + scheme: {{ include "artifactory.scheme" . | upper }} + port: {{ .Values.artifactory.internalPort }} + initialDelaySeconds: {{ .Values.artifactory.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.artifactory.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.artifactory.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.artifactory.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.artifactory.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.artifactory.persistence.mountPath }} + {{- range .Values.artifactory.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: artifactory-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.loggersResources | indent 10 }} + {{- end }} + {{ if .Values.artifactory.catalinaLoggers }} + {{- range .Values.artifactory.catalinaLoggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory.getImageInfoByValue" (list $ "logger") }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log/tomcat {{ . }}' + volumeMounts: + - name: artifactory-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.artifactory.catalinaLoggersResources | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + securityContext: + runAsUser: 0 + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: artifactory-volume + mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} + {{- if or .Values.artifactory.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "artifactory.customSidecarContainers" .) . | indent 6 }} + {{- end }} + {{- with .Values.artifactory.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.artifactory.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.artifactory.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: binarystore-xml + secret: + {{- if .Values.artifactory.persistence.customBinarystoreXmlSecret }} + secretName: {{ .Values.artifactory.persistence.customBinarystoreXmlSecret }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-binarystore + {{- end }} +{{- if .Values.artifactory.migration.enabled }} + - name: migration-scripts + configMap: + name: {{ template "artifactory.fullname" . }}-migration-scripts +{{- end }} + - name: installer-info + configMap: + name: {{ template "artifactory.fullname" . }}-installer-info + {{- if .Values.artifactory.userPluginSecrets }} + - name: bootstrap-plugins + emptyDir: {} + {{- range .Values.artifactory.userPluginSecrets }} + - name: {{ tpl . $ }} + secret: + secretName: {{ tpl . $ }} + {{- end }} + {{- end }} + {{- if and .Values.artifactory.persistence.enabled .Values.artifactory.persistence.existingClaim }} + - name: artifactory-volume + persistentVolumeClaim: + claimName: {{ .Values.artifactory.persistence.existingClaim }} + {{- end }} + {{- if .Values.artifactory.configMapName }} + - name: bootstrap-config + configMap: + name: {{ .Values.artifactory.configMapName }} + {{- end}} + {{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} + - name: tail-logger-script + configMap: + name: {{ template "artifactory.fullname" . }}-logger + {{- end }} + {{- if .Values.artifactory.configMaps }} + - name: artifactory-configmaps + configMap: + name: {{ template "artifactory.fullname" . }}-configmaps + {{- end }} + {{- if or .Values.artifactory.license.secret .Values.artifactory.license.licenseKey }} + - name: artifactory-license + secret: + {{- if .Values.artifactory.license.secret }} + secretName: {{ .Values.artifactory.license.secret }} + {{- else if .Values.artifactory.license.licenseKey }} + secretName: {{ template "artifactory.fullname" . }}-license + {{- end }} + {{- end }} + {{- if or (and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey) .Values.artifactory.admin.password }} + - name: access-bootstrap-creds + secret: + {{- if and .Values.artifactory.admin.secret .Values.artifactory.admin.dataKey }} + secretName: {{ .Values.artifactory.admin.secret }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-bootstrap-creds + {{- end }} + {{- end }} + {{- if eq .Values.artifactory.persistence.type "nfs" }} + - name: artifactory-data + persistentVolumeClaim: + claimName: {{ template "artifactory.fullname" . }}-data-pvc + - name: artifactory-backup + persistentVolumeClaim: + claimName: {{ template "artifactory.fullname" . }}-backup-pvc + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: artifactory-volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} + {{- if or .Values.systemYamlOverride.existingSecret .Values.artifactory.systemYaml }} + - name: systemyaml + secret: + secretName: {{ default (printf "%s-%s" (include "artifactory.fullname" .) "systemyaml") .Values.systemYamlOverride.existingSecret }} + {{- end }} + {{- if .Values.access.accessConfig }} + - name: access-config + secret: + secretName: {{ template "artifactory.fullname" . }}-access-config + {{- end }} + {{- if .Values.access.customCertificatesSecretName }} + - name: access-certs + secret: + secretName: {{ .Values.access.customCertificatesSecretName }} + {{- end }} + {{- if .Values.artifactory.customPersistentVolumeClaim }} + - name: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + persistentVolumeClaim: + claimName: {{ .Values.artifactory.customPersistentVolumeClaim.name }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "artifactory.name" . }}-filebeat-config + {{- end }} + {{- if or .Values.artifactory.customVolumes .Values.global.customVolumes }} +{{ tpl (include "artifactory.customVolumes" .) . | indent 6 }} + {{- end }} + {{- if not .Values.artifactory.persistence.enabled }} + - name: volume + emptyDir: + sizeLimit: {{ .Values.artifactory.persistence.size }} + {{- end }} +{{- with .Values.artifactory.persistence }} + {{- if and .enabled (not .existingClaim) }} + volumeClaimTemplates: + - metadata: + name: artifactory-volume + spec: + {{- if .storageClassName }} + {{- if (eq "-" .storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .storageClassName }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .accessMode }}" ] + resources: + requests: + storage: {{ .size }} + {{- end }} +{{- end }} + {{- if .Values.artifactory.customPersistentPodVolumeClaim }} + - metadata: + name: {{ .Values.artifactory.customPersistentPodVolumeClaim.name }} + spec: + {{- if .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }} + {{- if (eq "-" .Values.artifactory.customPersistentPodVolumeClaim.storageClassName) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.artifactory.customPersistentPodVolumeClaim.storageClassName }}" + {{- end }} + {{- end }} + accessModes: + {{- range .Values.artifactory.customPersistentPodVolumeClaim.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.artifactory.customPersistentPodVolumeClaim.size }} + {{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-system-yaml.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-system-yaml.yaml new file mode 100644 index 000000000..f36879218 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/artifactory-system-yaml.yaml @@ -0,0 +1,15 @@ +{{- if not .Values.systemYamlOverride.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "artifactory.fullname" . }}-systemyaml + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ tpl .Values.artifactory.systemYaml . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/filebeat-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..8ceb67c01 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.name" . }}-filebeat-config + labels: + app: {{ template "artifactory.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/ingress.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/ingress.yaml new file mode 100644 index 000000000..6f6bc11d1 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/ingress.yaml @@ -0,0 +1,107 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "artifactory.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +{{- $artifactoryServicePort := .Values.artifactory.externalArtifactoryPort -}} +{{- $ingressName := default ( include "artifactory.fullname" . ) .Values.ingress.name -}} +{{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $ingressName }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ .Values.ingress.labels | toYaml | trimSuffix "\n"| indent 4 -}} +{{- end}} +{{- if .Values.ingress.annotations }} + annotations: +{{ .Values.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} +{{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: {{ $.Values.ingress.routerPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: {{ $.Values.ingress.artifactoryPath }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $artifactoryServicePort }} + {{- end -}} +{{- end -}} + {{- with .Values.ingress.additionalRules }} +{{ tpl . $ | indent 2 }} + {{- end }} + + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- if .Values.artifactory.replicator.enabled }} +--- +{{- $replicatorIngressName := default ( include "artifactory.replicator.fullname" . ) .Values.artifactory.replicator.ingress.name -}} + {{- if semverCompare ">=v1.14.0-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 + {{- else }} +apiVersion: extensions/v1beta1 + {{- end }} +kind: Ingress +metadata: + name: {{ $replicatorIngressName }} + labels: + app: "{{ template "artifactory.name" $ }}" + chart: "{{ template "artifactory.chart" $ }}" + release: {{ $.Release.Name | quote }} + heritage: {{ $.Release.Service | quote }} + {{- if .Values.artifactory.replicator.ingress.annotations }} + annotations: +{{ .Values.artifactory.replicator.ingress.annotations | toYaml | trimSuffix "\n" | indent 4 -}} + {{- end }} +spec: + {{- if .Values.ingress.defaultBackend.enabled }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + rules: +{{- if .Values.artifactory.replicator.ingress.hosts }} + {{- range $host := .Values.artifactory.replicator.ingress.hosts }} + - host: {{ $host | quote }} + http: + paths: + - path: /replicator/ + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + - path: /artifactory/api/replication/replicate/file/streaming + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- end -}} + {{- if .Values.artifactory.replicator.ingress.tls }} + tls: +{{ toYaml .Values.artifactory.replicator.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- if .Values.customIngress }} +--- +{{ .Values.customIngress | toYaml | trimSuffix "\n" }} +{{- end -}} +{{- end -}} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/logger-configmap.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/logger-configmap.yaml new file mode 100644 index 000000000..f53f35e48 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.artifactory.loggers .Values.artifactory.catalinaLoggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-logger + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}*.log 2>/dev/null | head -1) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-artifactory-conf.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-artifactory-conf.yaml new file mode 100644 index 000000000..bd2ebea96 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-artifactory-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customArtifactoryConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-artifactory-conf + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + artifactory.conf: | +{{ tpl .Values.nginx.artifactoryConf . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-certificate-secret.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-certificate-secret.yaml new file mode 100644 index 000000000..8d6862903 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-certificate-secret.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.tlsSecretName) .Values.nginx.https.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-certificate + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ ( include "artifactory.gen-certs" . ) | indent 2 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-conf.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-conf.yaml new file mode 100644 index 000000000..851eae247 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-conf.yaml @@ -0,0 +1,14 @@ +{{- if and (not .Values.nginx.customConfigMap) .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "artifactory.fullname" . }}-nginx-conf + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + nginx.conf: | +{{ tpl .Values.nginx.mainConf . | indent 4 }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-deployment.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-deployment.yaml new file mode 100644 index 000000000..bc53f535b --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-deployment.yaml @@ -0,0 +1,203 @@ +{{- if .Values.nginx.enabled -}} +{{- $serviceName := include "artifactory.fullname" . -}} +{{- $servicePort := .Values.artifactory.externalPort -}} +apiVersion: apps/v1 +kind: {{ .Values.nginx.kind }} +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} +{{- if .Values.nginx.labels }} +{{ toYaml .Values.nginx.labels | indent 4 }} +{{- end }} +spec: +{{- if eq .Values.nginx.kind "StatefulSet" }} + serviceName: {{ template "artifactory.nginx.fullname" . }} +{{- end }} +{{- if ne .Values.nginx.kind "DaemonSet" }} + replicas: {{ .Values.nginx.replicaCount }} +{{- end }} + selector: + matchLabels: + app: {{ template "artifactory.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.nginx.name }} + template: + metadata: + annotations: + checksum/nginx-conf: {{ include (print $.Template.BasePath "/nginx-conf.yaml") . | sha256sum }} + checksum/nginx-artifactory-conf: {{ include (print $.Template.BasePath "/nginx-artifactory-conf.yaml") . | sha256sum }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: {{ template "artifactory.serviceAccountName" . }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "artifactory.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.nginx.priorityClassName }} + priorityClassName: {{ .Values.nginx.priorityClassName | quote }} + {{- end }} + initContainers: + - name: "setup" + image: "{{ .Values.initContainerImage }}" + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - '/bin/sh' + - '-c' + - > + rm -rfv {{ .Values.nginx.persistence.mountPath }}/lost+found; + mkdir -p {{ .Values.nginx.persistence.mountPath }}/logs; + volumeMounts: + - mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + name: nginx-volume + securityContext: + runAsUser: {{ .Values.nginx.uid }} + fsGroup: {{ .Values.nginx.gid }} + containers: + - name: {{ .Values.nginx.name }} + image: {{ include "artifactory.getImageInfoByValue" (list . "nginx") }} + imagePullPolicy: {{ .Values.nginx.image.pullPolicy }} + command: + - 'nginx' + - '-g' + - 'daemon off;' + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.1 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - containerPort: {{ .Values.nginx.http.internalPort }} + name: http + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttp }} + name: http-internal + {{- end }} + {{- if .Values.nginx.https }} + {{- if .Values.nginx.https.enabled }} + - containerPort: {{ .Values.nginx.https.internalPort }} + name: https + {{- end }} + {{- else }} # DEPRECATED + - containerPort: {{ .Values.nginx.internalPortHttps }} + name: https-internal + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - containerPort: {{ .Values.nginx.ssh.internalPort }} + name: tcp-ssh + {{- end }} + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: nginx-artifactory-conf + mountPath: "{{ .Values.nginx.persistence.mountPath }}/conf.d/" + - name: nginx-volume + mountPath: {{ .Values.nginx.persistence.mountPath | quote }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + mountPath: "{{ .Values.nginx.persistence.mountPath }}/ssl" + {{- end }} + resources: +{{ toYaml .Values.nginx.resources | indent 10 }} + {{- if .Values.nginx.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.nginx.readinessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.nginx.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.nginx.livenessProbe.path }} + {{- if .Values.nginx.http.enabled }} + port: {{ .Values.nginx.http.internalPort }} + scheme: HTTP + {{- else }} + port: {{ .Values.nginx.https.internalPort }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: {{ .Values.nginx.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nginx.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.nginx.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.nginx.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.nginx.livenessProbe.successThreshold }} + {{- end }} + {{- $mountPath := .Values.nginx.persistence.mountPath }} + {{- range .Values.nginx.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "artifactory.getImageInfoByValue" (list $ "logger") }} + command: + - tail + args: + - '-F' + - '{{ $mountPath }}/logs/{{ . }}' + volumeMounts: + - name: nginx-volume + mountPath: {{ $mountPath }} + resources: +{{ toYaml $.Values.nginx.loggersResources | indent 10 }} + {{- end }} + {{- with .Values.nginx.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.nginx.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + {{- if .Values.nginx.customConfigMap }} + name: {{ .Values.nginx.customConfigMap }} + {{- else }} + name: {{ template "artifactory.fullname" . }}-nginx-conf + {{- end }} + - name: nginx-artifactory-conf + configMap: + {{- if .Values.nginx.customArtifactoryConfigMap }} + name: {{ .Values.nginx.customArtifactoryConfigMap }} + {{- else }} + name: {{ template "artifactory.fullname" . }}-nginx-artifactory-conf + {{- end }} + - name: nginx-volume + {{- if .Values.nginx.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.nginx.persistence.existingClaim | default (include "artifactory.nginx.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.nginx.https.enabled }} + - name: ssl-certificates + secret: + {{- if .Values.nginx.tlsSecretName }} + secretName: {{ .Values.nginx.tlsSecretName }} + {{- else }} + secretName: {{ template "artifactory.fullname" . }}-nginx-certificate + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-pvc.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-pvc.yaml new file mode 100644 index 000000000..3394a0ade --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-pvc.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.nginx.persistence.enabled (.Values.nginx.enabled ) }} +{{- if (not .Values.nginx.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + accessModes: + - {{ .Values.nginx.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.nginx.persistence.size | quote }} +{{- if .Values.nginx.persistence.storageClassName }} +{{- if (eq "-" .Values.nginx.persistence.storageClassName) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.nginx.persistence.storageClassName }}" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-service.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-service.yaml new file mode 100644 index 000000000..7f4c8fb66 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/templates/nginx-service.yaml @@ -0,0 +1,73 @@ +{{- if .Values.nginx.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "artifactory.nginx.fullname" . }} + labels: + app: {{ template "artifactory.name" . }} + chart: {{ template "artifactory.chart" . }} + component: {{ .Values.nginx.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.nginx.service.annotations }} + annotations: +{{ toYaml .Values.nginx.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.nginx.service.type }} +{{- if eq .Values.nginx.service.type "LoadBalancer" }} + {{ if .Values.nginx.service.loadBalancerIP -}} + loadBalancerIP: {{ .Values.nginx.service.loadBalancerIP }} + {{ end -}} + {{- if .Values.nginx.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.nginx.service.externalTrafficPolicy }} + {{- end }} +{{- end }} +{{- if .Values.nginx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.nginx.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + # DEPRECATION NOTE: The following is to maintain support for values pre 1.3.0 and + # will be cleaned up in a later version + {{- if .Values.nginx.http }} + {{- if .Values.nginx.http.enabled }} + - port: {{ .Values.nginx.http.externalPort }} + targetPort: {{ .Values.nginx.http.internalPort }} + protocol: TCP + name: http + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttp }} + targetPort: {{ .Values.nginx.internalPortHttp }} + protocol: TCP + name: http + {{- end }} + {{- if .Values.nginx.https }} + {{- if or .Values.nginx.https.enabled .Values.nginx.service.ssloffload }} + - port: {{ .Values.nginx.https.externalPort }} + {{- if .Values.nginx.service.ssloffload }} + targetPort: {{ .Values.nginx.http.internalPort }} + {{- else }} + targetPort: {{ .Values.nginx.https.internalPort}} + {{- end }} + protocol: TCP + name: https + {{- end }} + {{- else }} # DEPRECATED + - port: {{ .Values.nginx.externalPortHttps }} + targetPort: {{ .Values.nginx.internalPortHttps }} + protocol: TCP + name: https + {{- end }} + {{- if .Values.artifactory.ssh.enabled }} + - port: {{ .Values.nginx.ssh.externalPort }} + targetPort: {{ .Values.nginx.ssh.internalPort }} + protocol: TCP + name: tcp-ssh + {{- end }} + selector: + app: {{ template "artifactory.name" . }} + component: {{ .Values.nginx.name }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-large.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-large.yaml new file mode 100644 index 000000000..6399c5b8a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-large.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "6Gi" + cpu: "4" + limits: + memory: "10Gi" + cpu: "8" + javaOpts: + xms: "6g" + xmx: "8g" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-medium.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-medium.yaml new file mode 100644 index 000000000..44aff82de --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-medium.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "8Gi" + cpu: "6" + javaOpts: + xms: "4g" + xmx: "6g" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-small.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-small.yaml new file mode 100644 index 000000000..04368ae15 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values-small.yaml @@ -0,0 +1,11 @@ +artifactory: + resources: + requests: + memory: "4Gi" + cpu: "2" + limits: + memory: "6Gi" + cpu: "4" + javaOpts: + xms: "4g" + xmx: "4g" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values.yaml new file mode 100644 index 000000000..dcbf72fdb --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/charts/artifactory/values.yaml @@ -0,0 +1,1376 @@ +# Default values for artifactory. +# This is a YAML-formatted file. + +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + + +global: + # imageRegistry: docker.bintray.io + # imagePullSecrets: + # - myRegistryKeySecretName + ## Chart.AppVersion can be overidden using global.versions.artifactory or .Values.artifactory.image.tag + ## Note: Order of preference is 1) global.versions 2) .Values.artifactory.image.tag 3) Chart.AppVersion + ## This applies also for nginx images (.Values.nginx.image.tag) + versions: {} + # artifactory: + # joinKey: + # masterKey: + # joinKeySecretName: + # masterKeySecretName: + # customInitContainersBegin: | + + # customInitContainers: | + + # customVolumes: | + + # customVolumeMounts: | + + # customSidecarContainers: | + + +initContainerImage: docker.bintray.io/alpine:3.12.1 + +# Init containers +initContainers: + resources: {} +# requests: +# memory: "64Mi" +# cpu: "10m" +# limits: +# memory: "128Mi" +# cpu: "250m" + +installer: + type: + platform: + +installerInfo: '{"productId": "Helm_artifactory/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + +# For supporting pulling from private registries +# imagePullSecrets: +# - myRegistryKeySecretName + +## Artifactory systemYaml override +## This is for advanced usecases where users wants to provide their own systemYaml for configuring artifactory +## Refer: https://www.jfrog.com/confluence/display/JFROG/Artifactory+System+YAML +## Note: This will override existing (default) .Values.artifactory.systemYaml in values.yaml +## Alternatively, systemYaml can be overidden via customInitContainers using external sources like vaults, external repositories etc. Please refer customInitContainer section below for an example. +## Note: Order of preference is 1) customInitContainers 2) systemYamlOverride existingSecret 3) default systemYaml in values.yaml +systemYamlOverride: +## You can use a pre-existing secret by specifying existingSecret + existingSecret: +## The dataKey should be the name of the secret data key created. + dataKey: + +## Role Based Access Control +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + verbs: + - get + - watch + - list + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + ## Service Account annotations + annotations: {} + +ingress: + enabled: false + defaultBackend: + enabled: true + # Used to create an Ingress record. + hosts: [] + routerPath: / + artifactoryPath: /artifactory/ + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/configuration-snippet: | + # proxy_pass_header Server; + # proxy_set_header X-JFrog-Override-Base-Url https://; + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + labels: {} + # traffic-type: external + # traffic-type: internal + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - artifactory.domain.example + + # Additional ingress rules + additionalRules: [] + +## Allows to add custom ingress +customIngress: | + +networkpolicy: + # Allows all ingress and egress + - name: artifactory + podSelector: + matchLabels: + app: artifactory + egress: + - {} + ingress: + - {} + # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgresql + # podSelector: + # matchLabels: + # app: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: artifactory + +logger: + image: + registry: docker.bintray.io + repository: busybox + tag: 1.31.1 + +# Artifactory +artifactory: + name: artifactory + # Note that by default we use appVersion to get image tag/version + image: + registry: docker.bintray.io + repository: jfrog/artifactory-pro + # tag: + pullPolicy: IfNotPresent + labels: {} + + # Create a priority class for the Artifactory pod or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties + deleteDBPropertiesOnStartup: true + + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 200 + extraConfig: 'acceptCount="100"' + + # Support for open metrics is only available for Artifactory 7.7.x (appVersions) and above. + # To enable set `.Values.artifactory.openMetrics.enabled` to `true` + # Refer - https://www.jfrog.com/confluence/display/JFROG/Open+Metrics + openMetrics: + enabled: false + + # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup + copyOnEveryStartup: + # # Absolute path + # - source: /artifactory_bootstrap/binarystore.xml + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + # # Absolute path + # - source: /artifactory_bootstrap/artifactory.lic + # # Relative to ARTIFACTORY_HOME/ + # target: etc/artifactory/ + + # Sidecar containers for tailing Artifactory logs + loggers: [] + # - access-audit.log + # - access-request.log + # - access-security-audit.log + # - access-service.log + # - artifactory-access.log + # - artifactory-event.log + # - artifactory-import-export.log + # - artifactory-request.log + # - artifactory-service.log + # - frontend-request.log + # - frontend-service.log + # - metadata-request.log + # - metadata-service.log + # - router-request.log + # - router-service.log + # - router-traefik.log + # - derby.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Sidecar containers for tailing Tomcat (catalina) logs + catalinaLoggers: [] + # - tomcat-catalina.log + # - tomcat-localhost.log + + # Tomcat (catalina) loggers resources + catalinaLoggersResources: {} + # requests: + # memory: "10Mi" + # cpu: "10m" + # limits: + # memory: "100Mi" + # cpu: "50m" + + # Migration support from 6.x to 7.x + migration: + enabled: true + timeoutSeconds: 3600 + ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + # - name: "custom-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: artifactory-volume + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-systemyaml-setup" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # command: + # - 'sh' + # - '-c' + # - 'wget -O {{ .Values.artifactory.persistence.mountPath }}/etc/system.yaml https:///systemyaml' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: artifactory-volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: "{{ .Values.initContainerImage }}" + # imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}" + # securityContext: + # runAsUser: 0 + # fsGroup: 0 + # command: + # - 'sh' + # - '-c' + # - 'sh /scripts/script.sh' + # volumeMounts: + # - mountPath: "{{ .Values.artifactory.persistence.mountPath }}" + # name: artifactory-volume + # - mountPath: "/scripts/script.sh" + # name: custom-script + # subPath: script.sh + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + ## Add custom volumes + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: "/scripts/script.sh" + # subPath: script.sh + # - name: posthook-start + # mountPath: "/scripts/posthoook-start.sh" + # subPath: posthoook-start.sh + # - name: prehook-start + # mountPath: "/scripts/prehook-start.sh" + # subPath: prehook-start.sh + + # Add custom persistent volume mounts - Available for the pod + # If skipPrepareContainer is set to true , this will skip the prepare-custom-persistent-volume init container + customPersistentPodVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + # skipPrepareContainer: false + + # Add custom persistent volume mounts - Available to the entire namespace + customPersistentVolumeClaim: {} + # name: + # mountPath: + # accessModes: + # - "-" + # size: + # storageClassName: + + ## Artifactory license. + license: + ## licenseKey is the license key in plain text. Use either this or the license.secret setting + licenseKey: + ## If artifactory.license.secret is passed, it will be mounted as + ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time. + secret: + ## The dataKey should be the name of the secret data key created. + dataKey: + + ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter + configMapName: + + # Add any list of configmaps to Artifactory + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## List of secrets for Artifactory user plugins. + ## One Secret per plugin's files. + userPluginSecrets: + # - archive-old-artifacts + # - build-cleanup + # - webhook + # - '{{ template "my-chart.fullname" . }}' + + ## Artifactory requires a unique master key. + ## You can generate one with the command: "openssl rand -hex 32" + ## An initial one is auto generated by Artifactory on first startup. + # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + # masterKeySecretName: + + ## Join Key to connect other services to Artifactory + ## IMPORTANT: Setting this value overrides the existing joinKey + ## IMPORTANT: You should NOT use the example joinKey for a production deployment! + # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + + # Add custom secrets - secret per file + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + + binarystore: + enabled: true + + ## admin allows to set the password for the default admin user. + ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate + admin: + ip: "127.0.0.1" + username: "admin" + password: + secret: + dataKey: + + ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle + # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar" + ## Extra post-start command to run extra commands after container starts + # postStartCommand: + + ## Extra environment variables that can be used to tune Artifactory to your needs. + ## Uncomment and set value as needed + extraEnvironmentVariables: + # - name: SERVER_XML_ARTIFACTORY_PORT + # value: "8081" + # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS + # value: "200" + # - name: SERVER_XML_ACCESS_MAX_THREADS + # value: "50" + # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_ACCESS_EXTRA_CONFIG + # value: "" + # - name: SERVER_XML_EXTRA_CONNECTOR + # value: "" + # - name: DB_POOL_MAX_ACTIVE + # value: "100" + # - name: DB_POOL_MAX_IDLE + # value: "10" + # - name: MY_SECRET_ENV_VAR + # valueFrom: + # secretKeyRef: + # name: my-secret-name + # key: my-secret-key + + systemYaml: | + shared: + logging: + consoleLog: + enabled: {{ .Values.artifactory.consoleLog }} + extraJavaOpts: > + -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }} + {{- with .Values.artifactory.javaOpts }} + -Dartifactory.async.corePoolSize={{ .corePoolSize }} + {{- if .xms }} + -Xms{{ .xms }} + {{- end }} + {{- if .xmx }} + -Xmx{{ .xmx }} + {{- end }} + {{- if .jmx.enabled }} + -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }} + -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }} + {{- if .jmx.host }} + -Djava.rmi.server.hostname={{ tpl .jmx.host $ }} + {{- else }} + -Djava.rmi.server.hostname={{ template "artifactory.fullname" $ }} + {{- end }} + {{- if .jmx.authenticate }} + -Dcom.sun.management.jmxremote.authenticate=true + -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }} + -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }} + {{- else }} + -Dcom.sun.management.jmxremote.authenticate=false + {{- end }} + {{- end }} + {{- if .other }} + {{ .other }} + {{- end }} + {{- end }} + {{- if or .Values.database.type .Values.postgresql.enabled }} + database: + {{- if .Values.postgresql.enabled }} + type: postgresql + url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}" + driver: org.postgresql.Driver + username: "{{ .Values.postgresql.postgresqlUsername }}" + {{- else }} + type: "{{ .Values.database.type }}" + driver: "{{ .Values.database.driver }}" + {{- end }} + {{- end }} + artifactory: + {{- if .Values.artifactory.openMetrics }} + metrics: + enabled: {{ .Values.artifactory.openMetrics.enabled }} + {{- end }} + database: + maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }} + frontend: + session: + timeMinutes: {{ .Values.frontend.session.timeoutMinutes | quote }} + access: + database: + maxOpenConnections: {{ .Values.access.database.maxOpenConnections }} + tomcat: + connector: + maxThreads: {{ .Values.access.tomcat.connector.maxThreads }} + extraConfig: {{ .Values.access.tomcat.connector.extraConfig }} + metadata: + database: + maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }} + {{- if .Values.artifactory.replicator.enabled }} + replicator: + enabled: true + {{- end }} + + annotations: {} + + service: + name: artifactory + type: ClusterIP + ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer) + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + + ## The following setting are to configure a dedicated Ingress object for Replicator service + replicator: + enabled: false + ingress: + name: + hosts: [] + annotations: {} + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/configuration-snippet: | + # chunked_transfer_encoding on; + tls: [] + # Secrets must be manually created in the namespace. + # - hosts: + # - artifactory.domain.example + # secretName: chart-example-tls-secret + + ## IMPORTANT: If overriding artifactory.internalPort: + ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024! + externalPort: 8082 + internalPort: 8082 + externalArtifactoryPort: 8081 + internalArtifactoryPort: 8081 + uid: 1030 + gid: 1030 + terminationGracePeriodSeconds: 30 + + ## By default, the Artifactory StatefulSet is created with a securityContext that sets the `runAsUser` and the `fsGroup` to the `artifactory.uid` value. + ## If you want to disable the securityContext for the Artifactory StatefulSet, set this tag to false + setSecurityContext: true + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 90 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + persistence: + mountPath: "/var/opt/jfrog/artifactory" + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + ## Storage default size. Should be increased for production deployments. + size: 20Gi + + ## Use a custom Secret to be mounted as your binarystore.xml + ## NOTE: This will ignore all settings below that make up binarystore.xml + customBinarystoreXmlSecret: + ## Cache default size. Should be increased for production deployments. + maxCacheSize: 5000000000 + cacheProviderDir: cache + + ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config + ## Supported types are: + ## file-system (default) + ## nfs + ## google-storage + ## aws-s3 + ## aws-s3-v3 + ## azure-blob + type: file-system + + ## Use binarystoreXml to provide a custom binarystore.xml + ## This can be a template or hardcoded. + binarystoreXml: | + {{- if eq .Values.artifactory.persistence.type "file-system" -}} + + + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{- end }} + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{- end }} + + + {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }} + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + {{- end }} + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "google-storage" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.mountPath }}/data/filestore + /tmp + + + + google-cloud-storage + {{ .Values.artifactory.persistence.googleStorage.endpoint }} + {{ .Values.artifactory.persistence.googleStorage.httpsOnly }} + {{ .Values.artifactory.persistence.googleStorage.bucketName }} + {{ .Values.artifactory.persistence.googleStorage.identity }} + {{ .Values.artifactory.persistence.googleStorage.credential }} + {{ .Values.artifactory.persistence.googleStorage.path }} + {{ .Values.artifactory.persistence.googleStorage.bucketExists }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + {{- with .Values.artifactory.persistence.awsS3V3 }} + + {{ .testConnection }} + {{- if .identity }} + {{ .identity }} + {{- end }} + {{- if .credential }} + {{ .credential }} + {{- end }} + {{ .region }} + {{ .bucketName }} + {{ .path }} + {{ .endpoint }} + {{- with .maxConnections }} + {{ . }} + {{- end }} + {{- with .kmsServerSideEncryptionKeyId }} + {{ . }} + {{- end }} + {{- with .kmsKeyRegion }} + {{ . }} + {{- end }} + {{- with .kmsCryptoMode }} + {{ . }} + {{- end }} + {{- if .useInstanceCredentials }} + true + {{- else }} + false + {{- end }} + {{ .usePresigning }} + {{ .signatureExpirySeconds }} + {{- with .cloudFrontDomainName }} + {{ . }} + {{- end }} + {{- with .cloudFrontKeyPairId }} + {{ .cloudFrontKeyPairId }} + {{- end }} + {{- with .cloudFrontPrivateKey }} + {{ . }} + {{- end }} + {{- with .enableSignedUrlRedirect }} + {{ . }} + {{- end }} + {{- with .enablePathStyleAccess }} + {{ . }} + {{- end }} + + {{- end }} + + {{- end }} + + {{- if eq .Values.artifactory.persistence.type "aws-s3" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.awsS3.endpoint }} + {{- if .Values.artifactory.persistence.awsS3.roleName }} + {{ .Values.artifactory.persistence.awsS3.roleName }} + true + {{- else }} + {{ .Values.artifactory.persistence.awsS3.refreshCredentials }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.s3AwsVersion }} + {{ .Values.artifactory.persistence.awsS3.testConnection }} + {{ .Values.artifactory.persistence.awsS3.httpsOnly }} + {{ .Values.artifactory.persistence.awsS3.region }} + {{ .Values.artifactory.persistence.awsS3.bucketName }} + {{- if .Values.artifactory.persistence.awsS3.identity }} + {{ .Values.artifactory.persistence.awsS3.identity }} + {{- end }} + {{- if .Values.artifactory.persistence.awsS3.credential }} + {{ .Values.artifactory.persistence.awsS3.credential }} + {{- end }} + {{ .Values.artifactory.persistence.awsS3.path }} + {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }} + + {{- end }} + + + {{- end }} + {{- if eq .Values.artifactory.persistence.type "azure-blob" }} + + + + + + + + + + + + + + + {{ .Values.artifactory.persistence.maxCacheSize }} + {{ .Values.artifactory.persistence.cacheProviderDir }} + + + + {{ .Values.artifactory.persistence.azureBlob.accountName }} + {{ .Values.artifactory.persistence.azureBlob.accountKey }} + {{ .Values.artifactory.persistence.azureBlob.endpoint }} + {{ .Values.artifactory.persistence.azureBlob.containerName }} + {{ .Values.artifactory.persistence.azureBlob.testConnection }} + + + {{- end }} + + ## For artifactory.persistence.type nfs + ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes + ## cluster nodes. + ## Need to have the following set + nfs: + # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}' + ip: + haDataMount: "/data" + haBackupMount: "/backup" + dataDir: "/var/opt/jfrog/artifactory" + backupDir: "/var/opt/jfrog/artifactory-backup" + capacity: 200Gi + + ## For artifactory.persistence.type file-system + fileSystem: + cache: + enabled: false + + ## For artifactory.persistence.type google-storage + googleStorage: + endpoint: commondatastorage.googleapis.com + httpsOnly: false + # Set a unique bucket name + bucketName: "artifactory-gcp" + identity: + credential: + path: "artifactory/filestore" + bucketExists: false + + ## For artifactory.persistence.type aws-s3-v3 + awsS3V3: + testConnection: false + identity: + credential: + region: + bucketName: artifactory-aws + path: artifactory/filestore + endpoint: + maxConnections: 50 + kmsServerSideEncryptionKeyId: + kmsKeyRegion: + kmsCryptoMode: + useInstanceCredentials: true + usePresigning: false + signatureExpirySeconds: 300 + cloudFrontDomainName: + cloudFrontKeyPairId: + cloudFrontPrivateKey: + enableSignedUrlRedirect: false + enablePathStyleAccess: false + + ## For artifactory.persistence.type aws-s3 + ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html + awsS3: + # Set a unique bucket name + bucketName: "artifactory-aws" + endpoint: + region: + roleName: + identity: + credential: + path: "artifactory/filestore" + refreshCredentials: true + httpsOnly: true + testConnection: false + s3AwsVersion: AWS4-HMAC-SHA256 + ## Additional properties to set on the s3 provider + properties: {} + # httpclient.max-connections: 100 + ## For artifactory.persistence.type azure-blob + azureBlob: + accountName: + accountKey: + endpoint: + containerName: + testConnection: false + ## artifactory data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + ## Annotations for the Persistent Volume Claim + annotations: {} + ## Uncomment the following resources definitions or pass them from command line + ## to control the cpu and memory resources allocated by the Kubernetes cluster + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "2Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory. + ## You should set them according to the resources set above + javaOpts: + # xms: "1g" + # xmx: "2g" + jmx: + enabled: false + port: 9010 + host: + ssl: false + # When authenticate is true, accessFile and passwordFile are required + authenticate: false + accessFile: + passwordFile: + corePoolSize: 8 + # other: "" + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + ssh: + enabled: false + internalPort: 1339 + externalPort: 1339 + +frontend: + ## Session settings + session: + ## Time in minutes after which the frontend token will need to be refreshed + timeoutMinutes: '30' + +access: + ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file. + ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates + ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes. + ## This ensures that the node to node communication is done over TLS. + accessConfig: + security: + tls: false + + ## You can use a pre-existing secret by specifying customCertificatesSecretName + ## Example : Create a tls secret using `kubectl create secret tls --cert=ca.crt --key=ca.private.key` + # customCertificatesSecretName: + + ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key + # resetAccessCAKeys: false + database: + maxOpenConnections: 80 + tomcat: + connector: + maxThreads: 50 + extraConfig: 'acceptCount="100"' + +metadata: + database: + maxOpenConnections: 80 + +# Nginx +nginx: + enabled: true + kind: Deployment + name: nginx + labels: {} + replicaCount: 1 + uid: 104 + gid: 107 + # Note that by default we use appVersion to get image tag/version + image: + registry: docker.bintray.io + repository: jfrog/nginx-artifactory-pro + # tag: + pullPolicy: IfNotPresent + + # Priority Class name to be used in deployment if provided + priorityClassName: + + # Sidecar containers for tailing Nginx logs + loggers: [] + # - access.log + # - error.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + + # Logs options + logs: + stderr: false + level: warn + + mainConf: | + # Main Nginx configuration file + worker_processes 4; + + {{ if .Values.nginx.logs.stderr }} + error_log stderr {{ .Values.nginx.logs.level }}; + {{- else -}} + error_log {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }}; + {{- end }} + pid /tmp/nginx.pid; + + {{- if .Values.artifactory.ssh.enabled }} + ## SSH Server Configuration + stream { + server { + listen {{ .Values.nginx.ssh.internalPort }}; + proxy_pass {{ include "artifactory.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }}; + } + } + {{- end }} + + events { + worker_connections 1024; + } + + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + variables_hash_max_size 1024; + variables_hash_bucket_size 64; + server_names_hash_max_size 4096; + server_names_hash_bucket_size 128; + types_hash_max_size 2048; + types_hash_bucket_size 64; + proxy_read_timeout 2400s; + client_header_timeout 2400s; + client_body_timeout 2400s; + proxy_connect_timeout 75s; + proxy_send_timeout 2400s; + proxy_buffer_size 128k; + proxy_buffers 40 128k; + proxy_busy_buffers_size 128k; + proxy_temp_file_write_size 250m; + proxy_http_version 1.1; + client_body_buffer_size 128k; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + log_format timing 'ip = $remote_addr ' + 'user = \"$remote_user\" ' + 'local_time = \"$time_local\" ' + 'host = $host ' + 'request = \"$request\" ' + 'status = $status ' + 'bytes = $body_bytes_sent ' + 'upstream = \"$upstream_addr\" ' + 'upstream_time = $upstream_response_time ' + 'request_time = $request_time ' + 'referer = \"$http_referer\" ' + 'UA = \"$http_user_agent\"'; + + access_log {{ .Values.nginx.persistence.mountPath }}/logs/access.log timing; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; + + } + + + artifactoryConf: | + {{- if .Values.nginx.https.enabled }} + ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; + ssl_certificate {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt; + ssl_certificate_key {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key; + ssl_session_cache shared:SSL:1m; + ssl_prefer_server_ciphers on; + {{- end }} + ## server configuration + server { + {{- if .Values.nginx.internalPortHttps }} + listen {{ .Values.nginx.internalPortHttps }} ssl; + {{- else -}} + {{- if .Values.nginx.https.enabled }} + listen {{ .Values.nginx.https.internalPort }} ssl; + {{- end }} + {{- end }} + {{- if .Values.nginx.internalPortHttp }} + listen {{ .Values.nginx.internalPortHttp }}; + {{- else -}} + {{- if .Values.nginx.http.enabled }} + listen {{ .Values.nginx.http.internalPort }}; + {{- end }} + {{- end }} + server_name ~(?.+)\.{{ include "artifactory.fullname" . }} {{ include "artifactory.fullname" . }} + {{- range .Values.ingress.hosts -}} + {{- if contains "." . -}} + {{ "" | indent 0 }} ~(?.+)\.{{ . }} + {{- end -}} + {{- end -}}; + + if ($http_x_forwarded_proto = '') { + set $http_x_forwarded_proto $scheme; + } + ## Application specific logs + ## access_log /var/log/nginx/artifactory-access.log timing; + ## error_log /var/log/nginx/artifactory-error.log; + rewrite ^/artifactory/?$ / redirect; + if ( $repo != "" ) { + rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break; + } + chunked_transfer_encoding on; + client_max_body_size 0; + + location / { + proxy_read_timeout 900; + proxy_pass_header Server; + proxy_cookie_path ~*^/.* /; + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/; + {{- if .Values.nginx.service.ssloffload}} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host; + {{- else }} + proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port; + proxy_set_header X-Forwarded-Port $server_port; + {{- end }} + proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto; + proxy_set_header Host $http_host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + location /artifactory/ { + if ( $request_uri ~ ^/artifactory/(.*)$ ) { + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1; + } + proxy_pass {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/; + } + } + } + + service: + ## For minikube, set this to NodePort, elsewhere use LoadBalancer + type: LoadBalancer + ssloffload: false + ## For supporting whitelist on the Nginx LoadBalancer service + ## Set this to a list of IP CIDR ranges + ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32'] + ## or pass from helm command line + ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}' + loadBalancerSourceRanges: [] + annotations: {} + ## Provide static ip address + loadBalancerIP: + ## There are two available options: “Cluster” (default) and “Local”. + externalTrafficPolicy: Cluster + + http: + enabled: true + externalPort: 80 + internalPort: 80 + https: + enabled: true + externalPort: 443 + internalPort: 443 + + ssh: + internalPort: 1339 + externalPort: 1339 + + # DEPRECATED: The following will be removed in a future release + # externalPortHttp: 80 + # internalPortHttp: 80 + # externalPortHttps: 443 + # internalPortHttps: 443 + + ## The following settings are to configure the frequency of the liveness and readiness probes + livenessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 180 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + readinessProbe: + enabled: true + path: /router/api/v1/system/health + initialDelaySeconds: 120 + failureThreshold: 10 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 1 + + ## The SSL secret that will be used by the Nginx pod + # tlsSecretName: chart-example-tls + ## Custom ConfigMap for nginx.conf + customConfigMap: + ## Custom ConfigMap for artifactory-ha.conf + customArtifactoryConfigMap: + persistence: + mountPath: "/var/opt/jfrog/nginx" + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + size: 5Gi + ## nginx data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClassName: "-" + resources: {} + # requests: + # memory: "250Mi" + # cpu: "100m" + # limits: + # memory: "250Mi" + # cpu: "500m" + nodeSelector: {} + + tolerations: [] + + affinity: {} + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +## Configuration values for the PostgreSQL dependency sub-chart +## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md +postgresql: + enabled: true + image: + registry: docker.bintray.io + repository: bitnami/postgresql + tag: 12.5.0-debian-10-r25 + postgresqlUsername: artifactory + postgresqlPassword: "" + postgresqlDatabase: artifactory + postgresqlExtendedConf: + listenAddresses: "'*'" + maxConnections: "1500" + persistence: + enabled: true + size: 50Gi + service: + port: 5432 + master: + nodeSelector: {} + affinity: {} + tolerations: [] + slave: + nodeSelector: {} + affinity: {} + tolerations: [] + resources: {} + # requests: + # memory: "512Mi" + # cpu: "100m" + # limits: + # memory: "1Gi" + # cpu: "500m" + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +## specify custom database details here or leave empty and Artifactory will use embedded derby +database: + type: + driver: + ## If you set the url, leave host and port empty + url: + ## If you would like this chart to create the secret containing the db + ## password, use these values + user: + password: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "rds-artifactory" + # key: "db-user" + # password: + # name: "rds-artifactory" + # key: "db-password" + # url: + # name: "rds-artifactory" + # key: "db-url" + +# Filebeat Sidecar container +## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly. +filebeat: + enabled: false + name: artifactory-filebeat + image: + repository: "docker.elastic.co/beats/filebeat" + version: 7.9.2 + logstashUrl: "logstash:5044" + + livenessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + filebeat test output + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + + resources: {} +# requests: +# memory: "100Mi" +# cpu: "100m" +# limits: +# memory: "100Mi" +# cpu: "100m" + + filebeatYml: | + logging.level: info + path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat + name: artifactory-filebeat + queue.spool: ~ + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.artifactory.persistence.mountPath }}/log/*.log + fields: + service: "jfrt" + log_type: "artifactory" + output: + logstash: + hosts: ["{{ .Values.filebeat.logstashUrl }}"] + +## Allows to add additional kubernetes resources +## Use --- as a separator between multiple resources +## For an example, refer - https://github.com/jfrog/log-analytics-prometheus/blob/master/artifactory-values.yaml +additionalResources: | diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/ci/default-values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/ci/default-values.yaml new file mode 100644 index 000000000..86355d3b3 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/ci/default-values.yaml @@ -0,0 +1,7 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +artifactory: + databaseUpgradeReady: true + + # To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release + postgresql: + postgresqlPassword: password diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/logo/jcr-logo.png b/charts/artifactory-jcr/artifactory-jcr/3.4.000/logo/jcr-logo.png new file mode 100644 index 000000000..e0ed038ca Binary files /dev/null and b/charts/artifactory-jcr/artifactory-jcr/3.4.000/logo/jcr-logo.png differ diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/questions.yml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/questions.yml new file mode 100644 index 000000000..9cde42870 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/questions.yml @@ -0,0 +1,271 @@ +questions: +# Advance Settings +- variable: artifactory.artifactory.masterKey + default: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + description: "Artifactory master key. For security reasons, we strongly recommend you generate your own master key using this command: 'openssl rand -hex 32'" + type: string + label: Artifactory master key + group: "Security Settings" + +# Container Images +- variable: defaultImage + default: true + description: "Use default Docker image" + label: Use Default Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: artifactory.artifactory.image.repository + default: "docker.bintray.io/jfrog/artifactory-jcr" + description: "JFrog Container Registry image name" + type: string + label: JFrog Container Registry Image Name + - variable: artifactory.artifactory.image.version + default: "7.6.3" + description: "JFrog Container Registry image tag" + type: string + label: JFrog Container Registry Image Tag + - variable: artifactory.imagePullSecrets + description: "Image Pull Secret" + type: string + label: Image Pull Secret + +# Services and LoadBalancing Settings +- variable: artifactory.ingress.enabled + default: false + description: "Expose app using Layer 7 Load Balancer - ingress" + type: boolean + label: Expose app using Layer 7 Load Balancer + show_subquestion_if: true + group: "Services and Load Balancing" + required: true + subquestions: + - variable: artifactory.ingress.hosts[0] + default: "xip.io" + description: "Hostname to your artifactory installation" + type: hostname + required: true + label: Hostname + +# Nginx Settings +- variable: artifactory.nginx.enabled + default: true + description: "Enable nginx server" + type: boolean + label: Enable Nginx Server + group: "Services and Load Balancing" + required: true + show_if: "artifactory.ingress.enabled=false" +- variable: artifactory.nginx.service.type + default: "LoadBalancer" + description: "Nginx service type" + type: enum + required: true + label: Nginx Service Type + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" + options: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" +- variable: artifactory.nginx.service.loadBalancerIP + default: "" + description: "Provide Static IP to configure with Nginx" + type: string + label: Config Nginx LoadBalancer IP + show_if: "artifactory.nginx.enabled=true&&artifactory.nginx.service.type=LoadBalancer&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" +- variable: artifactory.nginx.tlsSecretName + default: "" + description: "Provide SSL Secret name to configure with Nginx" + type: string + label: Config Nginx SSL Secret + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" +- variable: artifactory.nginx.customArtifactoryConfigMap + default: "" + description: "Provide configMap name to configure Nginx with custom `artifactory.conf`" + type: string + label: ConfigMap for Nginx Artifactory Config + show_if: "artifactory.nginx.enabled=true&&artifactory.ingress.enabled=false" + group: "Services and Load Balancing" + +# Database Settings +- variable: artifactory.postgresql.enabled + default: true + description: "Enable PostgreSQL" + type: boolean + required: true + label: Enable PostgreSQL + group: "Database Settings" + show_subquestion_if: true + subquestions: + - variable: artifactory.postgresql.postgresqlPassword + default: "" + description: "PostgreSQL password" + type: password + required: true + label: PostgreSQL Password + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.persistence.size + default: 20Gi + description: "PostgreSQL persistent volume size" + type: string + label: PostgreSQL Persistent Volume Size + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.persistence.storageClass + default: "" + description: "If undefined or null, uses the default StorageClass. Default to null" + type: storageclass + label: Default StorageClass for PostgreSQL + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.requests.cpu + default: "200m" + description: "PostgreSQL initial cpu request" + type: string + label: PostgreSQL Initial CPU Request + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.requests.memory + default: "500Mi" + description: "PostgreSQL initial memory request" + type: string + label: PostgreSQL Initial Memory Request + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.limits.cpu + default: "1" + description: "PostgreSQL cpu limit" + type: string + label: PostgreSQL CPU Limit + show_if: "artifactory.postgresql.enabled=true" + - variable: artifactory.postgresql.resources.limits.memory + default: "1Gi" + description: "PostgreSQL memory limit" + type: string + label: PostgreSQL Memory Limit + show_if: "artifactory.postgresql.enabled=true" +- variable: artifactory.database.type + default: "postgresql" + description: "xternal database type (postgresql, mysql, oracle or mssql)" + type: enum + required: true + label: External Database Type + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" + options: + - "postgresql" + - "mysql" + - "oracle" + - "mssql" +- variable: artifactory.database.url + default: "" + description: "External database URL. If you set the url, leave host and port empty" + type: string + label: External Database URL + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.host + default: "" + description: "External database hostname" + type: string + label: External Database Hostname + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.port + default: "" + description: "External database port" + type: string + label: External Database Port + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.user + default: "" + description: "External database username" + type: string + label: External Database Username + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" +- variable: artifactory.database.password + default: "" + description: "External database password" + type: password + label: External Database Password + group: "Database Settings" + show_if: "artifactory.postgresql.enabled=false" + +# Advance Settings +- variable: artifactory.advancedOptions + default: false + description: "Show advanced configurations" + label: Show Advanced Configurations + type: boolean + show_subquestion_if: true + group: "Advanced Options" + subquestions: + - variable: artifactory.artifactory.primary.resources.requests.cpu + default: "500m" + description: "Artifactory primary node initial cpu request" + type: string + label: Artifactory Primary Node Initial CPU Request + - variable: artifactory.artifactory.primary.resources.requests.memory + default: "1Gi" + description: "Artifactory primary node initial memory request" + type: string + label: Artifactory Primary Node Initial Memory Request + - variable: artifactory.artifactory.primary.javaOpts.xms + default: "1g" + description: "Artifactory primary node java Xms size" + type: string + label: Artifactory Primary Node Java Xms Size + - variable: artifactory.artifactory.primary.resources.limits.cpu + default: "2" + description: "Artifactory primary node cpu limit" + type: string + label: Artifactory Primary Node CPU Limit + - variable: artifactory.artifactory.primary.resources.limits.memory + default: "4Gi" + description: "Artifactory primary node memory limit" + type: string + label: Artifactory Primary Node Memory Limit + - variable: artifactory.artifactory.primary.javaOpts.xmx + default: "4g" + description: "Artifactory primary node java Xmx size" + type: string + label: Artifactory Primary Node Java Xmx Size + - variable: artifactory.artifactory.node.resources.requests.cpu + default: "500m" + description: "Artifactory member node initial cpu request" + type: string + label: Artifactory Member Node Initial CPU Request + - variable: artifactory.artifactory.node.resources.requests.memory + default: "2Gi" + description: "Artifactory member node initial memory request" + type: string + label: Artifactory Member Node Initial Memory Request + - variable: artifactory.artifactory.node.javaOpts.xms + default: "1g" + description: "Artifactory member node java Xms size" + type: string + label: Artifactory Member Node Java Xms Size + - variable: artifactory.artifactory.node.resources.limits.cpu + default: "2" + description: "Artifactory member node cpu limit" + type: string + label: Artifactory Member Node CPU Limit + - variable: artifactory.artifactory.node.resources.limits.memory + default: "4Gi" + description: "Artifactory member node memory limit" + type: string + label: Artifactory Member Node Memory Limit + - variable: artifactory.artifactory.node.javaOpts.xmx + default: "4g" + description: "Artifactory member node java Xmx size" + type: string + label: Artifactory Member Node Java Xmx Size + +# Internal Settings +- variable: installerInfo + default: '\{\"productId\": \"RancherHelm_artifactory-jcr/7.6.3\", \"features\": \[\{\"featureId\": \"Partner/ACC-007246\"\}\]\}' + type: string + group: "Internal Settings (Do not modify)" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.lock b/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.lock new file mode 100644 index 000000000..299d8cefe --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: artifactory + repository: https://charts.jfrog.io/ + version: 11.7.4 +digest: sha256:a4c52f49f154be6434a9a37474eee556de8d97a487be9dec923124a64651aac8 +generated: "2021-01-04T14:56:17.66958+05:30" diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.yaml new file mode 100644 index 000000000..4ad868b8a --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: artifactory + version: 11.7.4 + repository: https://charts.jfrog.io/ diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/templates/NOTES.txt b/charts/artifactory-jcr/artifactory-jcr/3.4.000/templates/NOTES.txt new file mode 100644 index 000000000..035bf8417 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/templates/NOTES.txt @@ -0,0 +1 @@ +Congratulations. You have just deployed JFrog Container Registry! diff --git a/charts/artifactory-jcr/artifactory-jcr/3.4.000/values.yaml b/charts/artifactory-jcr/artifactory-jcr/3.4.000/values.yaml new file mode 100644 index 000000000..250485d27 --- /dev/null +++ b/charts/artifactory-jcr/artifactory-jcr/3.4.000/values.yaml @@ -0,0 +1,75 @@ +# Default values for artifactory-jcr. +# This is a YAML-formatted file. + +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} + +# This chart is based on the main artifactory chart with some customizations. +# See all supported configuration keys in https://github.com/jfrog/charts/tree/master/stable/artifactory + +## All values are under the 'artifactory' sub chart. +artifactory: + + ## Artifactory + ## See full list of supported Artifactory options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + artifactory: + ## Default tag is from the artifactory sub-chart in the requirements.yaml + image: + registry: docker.bintray.io + repository: jfrog/artifactory-jcr + # tag: + + ## Uncomment the following resources definitions or pass them from command line + ## to control the cpu and memory resources allocated by the Kubernetes cluster + resources: {} + # requests: + # memory: "1Gi" + # cpu: "500m" + # limits: + # memory: "4Gi" + # cpu: "1" + ## The following Java options are passed to the java process running Artifactory. + ## You should set them according to the resources set above. + ## IMPORTANT: Make sure resources.limits.memory is at least 1G more than Xmx. + javaOpts: {} + # xms: "1g" + # xmx: "3g" + # other: "" + + installer: + platform: jcr-helm + + installerInfo: '{"productId": "Helm_artifactory-jcr/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}' + ## Nginx + ## See full list of supported Nginx options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + nginx: + enabled: true + tlsSecretName: "" + service: + type: LoadBalancer + + ## Ingress + ## See full list of supported Ingress options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + ingress: + enabled: false + tls: + + ## PostgreSQL + ## See list of supported postgresql options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + ## Configuration values for the PostgreSQL dependency sub-chart + ## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md + postgresql: + enabled: true + + ## This key is required for upgrades to protect old PostgreSQL chart's breaking changes. + databaseUpgradeReady: "yes" + + ## If NOT using the PostgreSQL in this chart (artifactory.postgresql.enabled=false), + ## specify custom database details here or leave empty and Artifactory will use embedded derby. + ## See full list of database options and documentation in artifactory chart: https://github.com/jfrog/charts/tree/master/stable/artifactory + # database: + + +## Enable the PostgreSQL sub chart +postgresql: + enabled: true diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/.helmignore b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/Chart.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/Chart.yaml new file mode 100644 index 000000000..10b3cf005 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/Chart.yaml @@ -0,0 +1,18 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: citrix-adc-istio-ingress-gateway +apiVersion: v1 +appVersion: 1.2.1 +description: A Helm chart for Citrix ADC as Ingress Gateway installation in Istio + Service Mesh on Kubernetes platform +home: https://www.citrix.com +icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png +maintainers: +- email: dhiraj.gedam@citrix.com + name: dheerajng +- email: subash.dangol@citrix.com + name: subashd +name: citrix-adc-istio-ingress-gateway +sources: +- https://github.com/citrix/citrix-istio-adaptor +version: 1.2.100 diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/README.md b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/README.md new file mode 100644 index 000000000..0362e7b1f --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/README.md @@ -0,0 +1,220 @@ +# Deploy Citrix ADC as an Ingress Gateway in Istio environment using Helm charts + +Citrix Application Delivery Controller (ADC) can be deployed as an Istio Ingress Gateway to control the ingress traffic to Istio service mesh. + +# Table of Contents +1. [TL; DR;](#tldr) +2. [Introduction](#introduction) +3. [Deploy Citrix ADC VPX or MPX as an Ingress Gateway](#deploy-citrix-adc-vpx-or-mpx-as-an-ingress-gateway) +4. [Deploy Citrix ADC CPX as an Ingress Gateway](#deploy-citrix-adc-cpx-as-an-ingress-gateway) +5. [Using Existing Certificates to deploy Citrix ADC as an Ingress Gateway](#using-existing-certificates-to-deploy-citrix-adc-as-an-ingress-gateway) +6. [Segregating traffic with multiple Ingress Gateways](#segregating-traffic-with-multiple-ingress-gateways) +7. [Visualizing statistics of Citrix ADC Ingress Gateway with Metrics Exporter](#visualizing-statistics-of-citrix-adc-ingress-gateway-with-metrics-exporter) +8. [Exposing services running on non-HTTP ports](#exposing-services-running-on-non-http-ports) +9. [Citrix ADC as Ingress Gateway: a sample deployment](#citrix-adc-as-ingress-gateway-a-sample-deployment) +10. [Uninstalling the Helm chart](#uninstalling-the-helm-chart) +11. [Configuration Parameters](#configuration-parameters) + + +## TL; DR; + +### To deploy Citrix ADC VPX or MPX as an Ingress Gateway: + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install citrix-adc-istio-ingress-gateway citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES --set istioAdaptor.netscalerUrl=https://[:port] --set istioAdaptor.vserverIP= + +### To deploy Citrix ADC CPX as an Ingress Gateway: + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install citrix-adc-istio-ingress-gateway citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES --set citrixCPX=true + + +## Introduction + +This chart deploys Citrix ADC VPX, MPX, or CPX as an Ingress Gateway in the Istio service mesh using the Helm package manager. For detailed information on different deployment options, see [Deployment Architecture](https://github.com/citrix/citrix-istio-adaptor/blob/master/docs/architecture.md). + +### Prerequisites + +The following prerequisites are required for deploying Citrix ADC as an Ingress Gateway in Istio service mesh: + +- Ensure that **Istio version 1.3.0** is installed +- Ensure that Helm with version 3.x is installed. Follow this [step](https://github.com/citrix/citrix-helm-charts/blob/master/Helm_Installation_version_3.md) to install the same. +- Ensure that your cluster has Kubernetes version 1.14.0 or later and the `admissionregistration.k8s.io/v1beta1` API is enabled +- **For deploying Citrix ADC VPX or MPX as an Ingress gateway:** + + Create a Kubernetes secret for the Citrix ADC user name and password using the following command: + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + +You can verify the API by using the following command: + + kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 + +The following output indicates that the API is enabled: + + admissionregistration.k8s.io/v1beta1 + +- **Important Note:** For deploying Citrix ADC VPX or MPX as ingress gateway, you should establish the connectivity between Citrix ADC VPX or MPX and cluster nodes. This connectivity can be established by configuring routes on Citrix ADC as mentioned [here](https://github.com/citrix/citrix-k8s-ingress-controller/blob/master/docs/network/staticrouting.md) or by deploying [Citrix Node Controller](https://github.com/citrix/citrix-k8s-node-controller). + + +## Deploy Citrix ADC VPX or MPX as an Ingress Gateway + + To deploy Citrix ADC VPX or MPX as an Ingress Gateway in the Istio service mesh, do the following step. In this example, release name is specified as `citrix-adc-istio-ingress-gateway` and namespace as `citrix-system`. + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install citrix-adc-istio-ingress-gateway citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,istioAdaptor.netscalerUrl=https://[:port],istioAdaptor.vserverIP= + +## Deploy Citrix ADC CPX as an Ingress Gateway + + To deploy Citrix ADC CPX as an Ingress Gateway, do the following step. In this example, release name is specified as `my-release` and namespace is used as `citrix-system`. + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,citrixCPX=true + + +## Using Existing Certificates to deploy Citrix ADC as an Ingress Gateway + +You may want to use the existing certificate and key for authenticating access to an application using Citrix ADC Ingress Gateway. In that case, you can create a Kubernetes secret from the existing certificate and key. You can mount the Kubernetes secret as data volumes in Citrix ADC Ingress Gateway. + +To create a Kubernetes secret using an existing key named `test_key.pem` and a certificate named `test.pem`, use the following command: + + kubectl create -n citrix-system secret tls citrix-ingressgateway-certs --key test_key.pem --cert test.pem + +Note: Ensure that Kubernetes secret is created in the same namespace where Citrix ADC Ingress Gateway is deployed. + +To deploy Citrix ADC VPX or MPX with secret volume, do the following step: + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,istioAdaptor.netscalerUrl=https://[:port],istioAdaptor.vserverIP=,ingressGateway.secretVolumes[0].name=test-ingressgateway-certs,ingressGateway.secretVolumes[0].secretName=test-ingressgateway-certs,ingressGateway.secretVolumes[0].mountPath=/etc/istio/test-ingressgateway-certs + +To deploy Citrix ADC CPX with secret volume, do the following step: + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,citrixCPX=true,ingressGateway.secretVolumes[0].name=test-ingressgateway-certs,ingressGateway.secretVolumes[0].secretName=test-ingressgateway-certs,ingressGateway.secretVolumes[0].mountPath=/etc/istio/test-ingressgateway-certs + +## Segregating traffic with multiple Ingress Gateways + +You can deploy multiple Citrix ADC Ingress Gateway devices and segregate traffic to various deployments in the Istio service mesh. This can be achieved with *custom labels*. By default, Citrix ADC Ingress Gateway service comes up with the `app: citrix-ingressgateway` label. This label is used as a selector while deploying the Ingress Gateway or virtual service resources. If you want to deploy Ingress Gateway with the custom label, you can do it using the `ingressGateway.label` option in the Helm chart. + +To deploy Citrix ADC CPX Ingress Gateway with the label `my_custom_ingressgateway`, do the following step: + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,citrixCPX=true,ingressGateway.lightWeightCPX=NO,ingressGateway.label=my_custom_ingressgateway + +To deploy Citrix ADC VPX or MPX as an Ingress Gateway with the label `my_custom_ingressgateway`, do the following step: + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,istioAdaptor.netscalerUrl=https://[:port],istioAdaptor.vserverIP=,ingressGateway.label=my_custom_ingressgateway + +## Visualizing statistics of Citrix ADC Ingress Gateway with Metrics Exporter + +By default, [Citrix ADC Metrics Exporter](https://github.com/citrix/citrix-adc-metrics-exporter) is also deployed along with Citrix ADC Ingress Gateway. Citrix ADC Metrics Exporter fetches statistical data from Citrix ADC and exports it to Prometheus running in Istio service mesh. When you add Prometheus as a data source in Grafana, you can visualize this statistical data in the Grafana dashboard. + +Metrics Exporter requires the IP address of Citrix ADC CPX or VPX Ingress Gateway. It is retrieved from the value specified for `istioAdaptor.netscalerUrl`. + +When Citrix ADC CPX is deployed as Ingress Gateway, Metrics Exporter runs along with Citrix CPX Ingress Gateway in the same pod and specifying IP address is optional. + +To deploy Citrix ADC as Ingress Gateway without Metrics Exporter, set the value of `metricExporter.required` as false. + + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install citrix-adc-istio-ingress-gateway citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,istioAdaptor.netscalerUrl=https://[:port],istioAdaptor.vserverIP=,metricExporter.required=false + +"Note:" To remotely access telemetry addons such as Prometheus and Grafana, see [Remotely Accessing Telemetry Addons](https://istio.io/docs/tasks/telemetry/gateways/). + +## Exposing services running on non-HTTP ports + +By default, services running on HTTP ports (80 & 443) are exposed through Citrix ADC Ingress Gateway. Similarly, you can expose services that are deployed on non-HTTP ports through the Citrix ADC Ingress Gateway device. + +To deploy Citrix ADC MPX or VPX, and expose a service running on a TCP port, do the following step. + +In this example, a service running on TCP port 5000 is exposed using port 10000 on Citrix ADC. + + kubectl create secret generic nslogin --from-literal=username= --from-literal=password= -n citrix-system + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,istioAdaptor.netscalerUrl=https://[:port],istioAdaptor.vserverIP=,ingressGateway.tcpPort[0].name=tcp1,ingressGateway.tcpPort[0].port=10000,ingressGateway.tcpPort[0].targetPort=5000 + + To deploy Citrix ADC CPX and expose a service running on a TCP port, do the following step. + In this example, port 10000 on the Citrix ADC CPX instance is exposed using TCP port 30000 (node port configuration) on the host machine. + + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install my-release citrix/citrix-adc-istio-ingress-gateway --namespace citrix-system --set ingressGateway.EULA=YES,citrixCPX=true,ingressGateway.tcpPort[0].name=tcp1,ingressGateway.tcpPort[0].nodePort=30000,ingressGateway.tcpPort[0].port=10000,ingressGateway.tcpPort[0].targetPort=5000 + + +## Citrix ADC as Ingress Gateway: a sample deployment + +A sample deployment of Citrix ADC as an Ingress gateway for the Bookinfo application is provided [here](https://github.com/citrix/citrix-helm-charts/tree/master/examples/citrix-adc-in-istio). + +## Uninstalling the Helm chart + +To uninstall or delete a chart with release name as `my-release`, do the following step. + + helm delete my-release + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration parameters + +The following table lists the configurable parameters in the Helm chart and their default values. + + +| Parameter | Description | Default | Optional/Mandatory | +|--------------------------------|-------------------------------|---------------------------|---------------------------| +| `citrixCPX` | Citrix ADC CPX | FALSE | Mandatory for Citrix ADC CPX | +| `istioAdaptor.image` | Image of the Citrix Istio-adaptor container |quay.io/citrix/citrix-istio-adaptor| Mandatory| +| `istioAdaptor.tag` | Tag of the Istio adaptor image | 1.2.0 | Mandatory| +| `istioAdaptor.imagePullPolicy` | Image pull policy for Istio-adaptor | IfNotPresent | Optional| +| `istioAdaptor.vserverIP` | Virtual server IP address on Citrix ADC (Mandatory if citrixCPX=false) | null | Mandatory for Citrix ADC MPX or VPX| +| `istioAdaptor.netscalerUrl` | URL or IP address of the Citrix ADC which Istio-adaptor configures (Mandatory if citrixCPX=false)| null |Mandatory for Citrix ADC MPX or VPX| +| `istioAdaptor.secureConnect` | If this value is set to true, Istio-adaptor establishes secure gRPC channel with Istio Pilot | TRUE | Optional| +| `istioAdaptor.netProfile ` | Network profile name used by [CNC](https://github.com/citrix/citrix-k8s-node-controller) to configure Citrix ADC VPX or MPX which is deployed as Ingress Gateway | null | Optional| +| `istioAdaptor.coeURL` | Name of [Citrix Observability Exporter](https://github.com/citrix/citrix-observability-exporter) Service in the form of "." | null | Optional| +| `istioAdaptor.ADMIP ` | Citrix Application Delivery Management (ADM) IP address | NIL | Mandatory for Citrix ADC CPX | +| `istioAdaptor.ADMFingerPrint ` | Citrix Application Delivery Management (ADM) Finger Print. For more information, see [this](https://docs.citrix.com/en-us/citrix-application-delivery-management-service/application-analytics-and-management/service-graph.html) | NIL | Optional| +| `ingressGateway.image` | Image of Citrix ADC CPX designated to run as Ingress Gateway |quay.io/citrix/citrix-k8s-cpx-ingress| Mandatory for Citrix ADC CPX | +| `ingressGateway.tag` | Version of Citrix ADC CPX | 13.0-47.22 | Mandatory for Citrix ADC CPX | +| `ingressGateway.imagePullPolicy` | Image pull policy | IfNotPresent | Optional| +| `ingressGateway.EULA` | End User License Agreement(EULA) terms and conditions. If yes, then user agrees to EULA terms and conditions. | NO | Mandatory for Citrix ADC CPX +| `ingressGateway.mgmtHttpPort` | Management port of the Citrix ADC CPX | 9080 | Optional| +| `ingressGateway.mgmtHttpsPort` | Secure management port of Citrix ADC CPX | 9443 | Optional| +| `ingressGateway.httpNodePort` | Port on host machine which is used to expose HTTP port (80) of Citrix ADC CPX | 30180 |Optional| +| `ingressGateway.httpsNodePort` | Port on host machine which is used to expose HTTPS port (443) of Citrix ADC CPX | 31443 |Optional| +| `ingressGateway.secretVolume` | A map of user defined volumes to be mounted using Kubernetes secrets | null |Optional| +| `ingressGateway.licenseServerPort` | Citrix ADM port if a non-default port is used | 27000 | Optional| +| `ingressGateway.label` | Custom label for the Ingress Gateway service | citrix-ingressgateway |Optional| +| `ingressGateway.tcpPort` | For exposing multiple TCP ingress | NIL |Optional| +| `istioPilot.name` | Name of the Istio Pilot service | istio-pilot |Optional| +| `istioPilot.namespace` | Namespace where Istio Pilot is running | istio-system |Optional| +| `istioPilot.secureGrpcPort` | Secure GRPC port where Istio Pilot is listening (default setting) | 15011 |Optional| +| `istioPilot.insecureGrpcPort` | Insecure GRPC port where Istio Pilot is listening | 15010 |Optional| +| `istioPilot.SAN` | Subject alternative name for Istio Pilot which is the secure production identity framework for everyone (SPIFFE) ID of Istio Pilot | spiffe://cluster.local/ns/istio-system/sa/istio-pilot-service-account |Optional| +| `metricExporter.required` | Metrics exporter for Citrix ADC | TRUE |Optional| +| `metricExporter.image` | Image of the Citrix ADC Metrics Exporter | quay.io/citrix/citrix-adc-metrics-exporter |Optional| +| `metricExporter.version` | Version of the Citrix ADC Metrics Exporter image | 1.4.0 |Optional| +| `metricExporter.port` | Port over which Citrix ADC Metrics Exporter collects metrics of Citrix ADC. | 8888 |Optional| +| `metricExporter.secure` | Enables collecting metrics over TLS | YES |Optional| +| `metricExporter.logLevel` | Level of logging in Citrix ADC Metrics Exporter. Possible values are: DEBUG, INFO, WARNING, ERROR, CRITICAL | ERROR |Optional| +| `metricExporter.imagePullPolicy` | Image pull policy for Citrix ADC Metrics Exporter | IfNotPresent |Optional| + diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/app-readme.md b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/app-readme.md new file mode 100644 index 000000000..15e4cb6fe --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/app-readme.md @@ -0,0 +1,18 @@ +# Citrix ADC as an Ingress Gateway for Istio + +An [Istio](https://istio.io/) ingress gateway acts as an entry point for the incoming traffic and secures and controls access to the service mesh. It also performs routing and load balancing. Citrix ADC [CPX](https://docs.citrix.com/en-us/citrix-adc-cpx), MPX, or [VPX](https://docs.citrix.com/en-us/citrix-adc.html), can be deployed as an ingress gateway to the Istio service mesh. + +### Prerequisites + +The following prerequisites are required for deploying Citrix ADC as an Ingress Gateway in Istio service mesh: + +- Ensure that **Istio** is enabled. +- Ensure that your cluster has Kubernetes version 1.14.0 or later. +- Ensure to create secret named **nslogin** with username and password in same namespace in case of VPX/MPX . Choose the **Resources > Secrets** in the navigation bar. + +### Important NOTE: +- Follow this [link](https://github.com/citrix/citrix-helm-charts/blob/master/examples/citrix-adc-in-istio/README.md +) to deploy Citrix ADC as an ingress gateway for application. +- For deploying Citrix ADC VPX or MPX as ingress gateway, you should establish the connectivity between Citrix ADC VPX or MPX and cluster nodes. This connectivity can be established by configuring routes on Citrix ADC as mentioned [here](https://github.com/citrix/citrix-k8s-ingress-controller/blob/master/docs/network/staticrouting.md) or by deploying [Citrix Node Controller](https://github.com/citrix/citrix-k8s-node-controller). + +This catalog deploys Citrix ADC VPX, MPX, or CPX as an Ingress Gateway in the Istio service mesh. For detailed information on various deployment options,checkout this [link](https://github.com/citrix/citrix-istio-adaptor). diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/questions.yml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/questions.yml new file mode 100644 index 000000000..1ee110b95 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/questions.yml @@ -0,0 +1,300 @@ +questions: +- variable: citrixCPX + required: true + type: boolean + default: true + description: "Set true to use Citrix ADC CPX as ingress device. Set false to use VPX/MPX as ingress device" + label: citrixCPX + group: "Deployment Settings" +- variable: secrets.name + required: true + type: string + default: "nslogin" + description: "Ensure to create nslogin secret in same namespace" + show_if: "citrixCPX=false" + group: "nslogin Settings" +- variable: istioAdaptor.image + required: true + type: string + default: "quay.io/citrix/citrix-istio-adaptor" + label: istioAdaptor Image + description: "Istio-adaptor Image to be used" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.tag + required: true + type: string + default: "1.2.1" + label: istioAdaptor tag + group: "Istio-adaptor Settings" +- variable: istioAdaptor.imagePullPolicy + required: true + type: enum + default: IfNotPresent + label: istioAdaptor imagePullPolicy + description: "Istio-adaptor Image pull policy" + options: + - "Always" + - "IfNotPresent" + - "Never" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.netscalerUrl + required: true + type: string + default: null + label: istioAdaptor netscalerUrl + description: "URL or IP address of the Citrix ADC which Istio-adaptor configures" + show_if: "citrixCPX=false" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.secureConnect + type: boolean + default: true + label: istioAdaptor secureConnect + description: "If this value is set to true, Istio-adaptor establishes secure gRPC channel with Istio Pilot" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.vserverIP + required: true + type: string + label: istioAdaptor vserverIP + show_if: "citrixCPX=false" + descriptions: "Virtual server IP address on Citrix ADC" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.netProfile + type: string + label: istioAdaptor netProfile + description: "profile name used by CNC to configure VPX/MPX" + show_if: "citrixCPX=false" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.ADMIP + type: string + label: istioAdaptor ADMIP + description: "Citrix Application Delivery Management (ADM) IP address" + group: "Istio-adaptor Settings" +- variable: istioAdaptor.ADMFingerPrint + type: string + label: istioAdaptor ADMFingerPrint + description: "Citrix Application Delivery Management (ADM) Finger Print." + group: "Istio-adaptor Settings" +- variable: istioAdaptor.coeURL + type: string + label: istioAdaptor coeURL + description: "Name of Citrix Observability Exporter Service" + group: "Istio-adaptor Settings" +- variable: istioPilot.name + required: true + type: string + default: istio-pilot + label: istio-pilot name + group: "istio-pilot Settings" + description: "Name of the Istio Pilot service" +- variable: istioPilot.namespace + required: true + type: string + default: istio-system + label: istio-pilot namespace + description: "Namespace where Istio Pilot is running" + group: "istio-pilot Settings" +- variable: istioPilot.secureGrpcPort + required: true + type: int + default: 15011 + label: istio-pilot secureGrpcPort + show_if: "istioAdaptor.secureConnect=true" + description: "Secure GRPC port where Istio Pilot is listening" + group: "istio-pilot Settings" +- variable: istioPilot.insecureGrpcPort + required: true + type: int + default: 15010 + show_if: "istioAdaptor.secureConnect=false" + label: istio-pilot insecureGrpcPort + description: "Insecure GRPC port where Istio Pilot is listening" + group: "istio-pilot Settings" +- variable: istioPilot.SAN + required: true + type: string + default: "spiffe://cluster.local/ns/istio-system/sa/istio-pilot-service-account" + label: istio-pilot SAN + description: "Subject alternative name for Istio Pilot which is (SPIFFE) ID of Istio Pilot" + show_if: "istioAdaptor.secureConnect=true" + group: "istio-pilot Settings" +- variable: ingressGateway.image + required: true + type: string + default: "quay.io/citrix/citrix-k8s-cpx-ingress" + label: ingressGateway Image + description: "ingressGateway image to be used" + group: "ingressGateway Settings" +- variable: ingressGateway.tag + required: true + type: string + default: "13.0-47.22" + label: ingressGateway tag + group: "ingressGateway Settings" +- variable: ingressGateway.imagePullPolicy + required: true + type: enum + default: IfNotPresent + label: ingressGateway imagePullPolicy + description: Ingress-gateway Image pull policy + group: "ingressGateway Settings" + options: + - "Always" + - "IfNotPresent" + - "Never" +- variable: ingressGateway.EULA + required: true + type: enum + description: "End user license agreement (read EULA before accepting it yes)" + label: ingressGateway EULA + options: + - "YES" + - "NO" + group: "ingressGateway Settings" +- variable: ingressGateway.mgmtHttpPort + required: true + type: int + default: 10080 + label: ingressGateway mgmtHttpPort + description: "Management port of the Citrix ADC CPX" + show_if: "citrixCPX=true" + group: "ingressGateway Settings" +- variable: ingressGateway.mgmtHttpsPort + required: true + type: int + default: 10443 + show_if: "citrixCPX=true" + label: ingressGateway mgmtHttpsPort + description: "Secure management port of Citrix ADC CPX" + group: "ingressGateway Settings" +- variable: ingressGateway.httpNodePort + required: true + type: int + default: 30180 + show_if: "citrixCPX=true" + label: ingressGateway httpNodePort + description: "Port on host machine which is used to expose HTTP port of Citrix ADC CPX" + group: "ingressGateway Settings" +- variable: ingressGateway.httpsNodePort + required: true + type: int + default: 31443 + show_if: "citrixCPX=true" + label: ingressGateway httpsNodePort + description: "Port on host machine which is used to expose HTTPS port of Citrix ADC CPX" + group: "ingressGateway Settings" +- variable: ingressGateway.exposeMutipleApps + required: true + type: boolean + default: false + description: "By default, only one service is exposed via ingress gateway. To expose another service, select it TRUE, and then specify a set of secret, volume name, mount path in subsequent fields" + label: exposeMutipleApps + group: "ingressGateway Settings" +- variable: ingressGateway.secretVolumes[0].name + required: true + type: string + show_if: "ingressGateway.exposeMutipleApps=true" + label: ingressGateway secretVolumes name + group: "ingressGateway Settings" +- variable: ingressGateway.secretVolumes[0].secretName + required: true + type: string + show_if: "ingressGateway.exposeMutipleApps=true" + label: ingressGateway secretVolumes secretName + description: "user defined volumes to be mounted using Kubernetes secrets name" + group: "ingressGateway Settings" +- variable: ingressGateway.secretVolumes[0].mountPath + required: true + type: string + show_if: "ingressGateway.exposeMutipleApps=true" + label: ingressGateway secretVolumes mountPath + group: "ingressGateway Settings" +- variable: ingressGateway.licenseServerPort + type: int + default: 27000 + label: ingressGateway licenseServerPort + description: "Citrix ADM port if a non-default port is used" + group: "ingressGateway Settings" +- variable: ingressGateway.label + required: true + type: string + default: "citrix-ingressgateway" + label: ingressGateway label + description: "Custom label for the Ingress Gateway service" + group: "ingressGateway Settings" +- variable: ingressGateway.exposeNonHttpService + required: true + type: boolean + default: false + description: "By default, gateway is configured to expose HTTP(S) services. To expose non-HTTP services, select exposeNonHttpService to True, and then specify a set of port, port-name, target-port, nodeport (if applicable) in subsequent field." + label: exposeNonHttpService + group: "ingressGateway Settings" +- variable: ingressGateway.tcpPort[0].name + required: true + type: string + default: + label: Services runing on tcpPort name + show_if: "ingressGateway.exposeNonHttpService=true" + group: "ingressGateway Settings" +- variable: ingressGateway.tcpPort[0].nodePort + required: true + type: int + min: 30000 + max: 32767 + label: Citrix ADC CPX exposed using nodePort + show_if: "citrixCPX=true && ingressGateway.exposeNonHttpService=true" + description: "NodePort (to set explicitly, choose port between 30000-32767)" + group: "ingressGateway Settings" +- variable: ingressGateway.tcpPort[0].port + required: true + type: int + label: Services exposed using Port on Citrix ADC + show_if: "ingressGateway.exposeNonHttpService=true" + group: "ingressGateway Settings" +- variable: ingressGateway.tcpPort[0].targetPort + required: true + type: int + label: Services running on targetPort + show_if: "ingressGateway.exposeNonHttpService=true" + group: "ingressGateway Settings" +- variable: metricExporter.image + required: true + type: string + default: "quay.io/citrix/citrix-adc-metrics-exporter" + label: Exporter Image + description: "Exporter Image to be used" + group: "metricExporter Settings" +- variable: metricExporter.version + required: true + type: string + default: "1.4.0" + label: metricExporter Version + group: "metricExporter Settings" +- variable: metricExporter.port + required: true + type: int + default: 8888 + label: metricExporter Port + group: "metricExporter Settings" +- variable: metricExporter.logLevel + required: true + type: enum + default: ERROR + label: metricExporter logLevel + group: "metricExporter Settings" + options: + - "DEBUG" + - "INFO" + - "WARNING" + - "ERROR" + - "TRACE" +- variable: metricExporter.imagePullPolicy + required: true + type: enum + default: IfNotPresent + label: metricExporter imagePullPolicy + description: "Exporter Image pull policy" + group: "metricExporter Settings" + options: + - "Always" + - "IfNotPresent" + - "Never" diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/_helpers.tpl b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/_helpers.tpl new file mode 100644 index 000000000..91374c7bd --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/_helpers.tpl @@ -0,0 +1,4 @@ +{{- define "exporter_nsip" -}} +{{- $match := .Values.istioAdaptor.netscalerUrl | toString | regexFind "//.*[:]*" -}} +{{- $match | trimAll ":" | trimAll "/" -}} +{{- end -}} diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/citrix-adc-ingressgateway-deployment.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/citrix-adc-ingressgateway-deployment.yaml new file mode 100644 index 000000000..fcf187f55 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/citrix-adc-ingressgateway-deployment.yaml @@ -0,0 +1,330 @@ +{{- if eq .Values.citrixCPX true }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: citrix-ingressgateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + template: + metadata: + labels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + prometheus.io/port: "{{ .Values.metricExporter.port }}" + prometheus.io/scrape: "true" + spec: + volumes: + - name: nslogin + secret: + secretName: nslogin + - name: istio-certs + secret: + optional: true + secretName: istio.default + - name: citrix-ingressgateway-certs + secret: + optional: true + secretName: "citrix-ingressgateway-certs" # IMPORTANT: This secret MUST BE created before deploying gateway and ingress-gateway + - name: citrix-ingressgateway-ca-certs + secret: + optional: true + secretName: "citrix-ingressgateway-ca-certs" # IMPORTANT: This secret MUST BE created before deploying gateway and ingress-gateway + {{- range .Values.ingressGateway.secretVolumes }} + - name: {{ .name }} + secret: + secretName: {{ .secretName | quote }} + optional: true + {{- end }} + - name: cpx-conf + emptyDir: {} + containers: +{{- if eq .Values.metricExporter.required true }} + - name: exporter + image: {{ .Values.metricExporter.image }}:{{ .Values.metricExporter.version }} + imagePullPolicy: IfNotPresent + args: + - "--target-nsip=127.0.0.1" + - "--port={{ .Values.metricExporter.port }}" + - "--log-level={{ .Values.metricExporter.logLevel }}" + env: + - name: NS_USER + valueFrom: + secretKeyRef: + name: nslogin + key: username + - name: NS_PASSWORD + valueFrom: + secretKeyRef: + name: nslogin + key: password +{{- end }} + - name: istio-adaptor + image: {{ .Values.istioAdaptor.image }}:{{ .Values.istioAdaptor.tag }} + imagePullPolicy: {{ .Values.istioAdaptor.imagePullPolicy }} + args: + - -pilot-location +{{- if eq .Values.istioAdaptor.secureConnect true }} + - {{ .Values.istioPilot.name}}.{{.Values.istioPilot.namespace }}:{{ .Values.istioPilot.secureGrpcPort }} # istio-pilot.istio-system:15011 +{{- else }} + - {{ .Values.istioPilot.name}}.{{.Values.istioPilot.namespace }}:{{ .Values.istioPilot.insecureGrpcPort }} # istio-pilot.istio-system:15010 +{{- end }} + - -proxy-type + - {{ .Values.istioAdaptor.proxyType | default "router" | quote }} + - -pilot-SAN + - {{ .Values.istioPilot.SAN }} + - -netscaler-url + - "http://127.0.0.1" + - -secure-connect={{ .Values.istioAdaptor.secureConnect}} + - -adm-ip +{{- if .Values.istioAdaptor.ADMIP }} + - {{ .Values.istioAdaptor.ADMIP }} +{{- else }} + - "" +{{- end }} +{{- if .Values.istioAdaptor.coeURL }} + - -coe-url + - {{ .Values.istioAdaptor.coeURL }} +{{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: APPLICATION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app'] + securityContext: + readOnlyRootFilesystem: true + runAsUser: 32024 # UID of istio-adaptor container's user + volumeMounts: + - mountPath: /etc/certs + name: istio-certs + readOnly: true + - mountPath: /etc/nslogin + name: nslogin + readOnly: true + - mountPath: /etc/istio/ingressgateway-certs # Make sure that Gateway definition has this path mentioned in server.tls section for SIMPLE TLS + name: citrix-ingressgateway-certs + readOnly: true + - mountPath: /etc/istio/ingressgateway-ca-certs # Make sure that Gateway definition has this path mentioned in server.tls section for MUTUAL TLS + name: citrix-ingressgateway-ca-certs + readOnly: true + {{- range .Values.ingressGateway.secretVolumes }} + - name: {{ .name }} + mountPath: {{ .mountPath | quote }} + readOnly: true + {{- end }} + - name: citrix-ingressgateway + image: "{{ .Values.ingressGateway.image }}:{{ .Values.ingressGateway.tag }}" + imagePullPolicy: {{ .Values.ingressGateway.imagePullPolicy }} + securityContext: + privileged: true + ports: + - containerPort: 80 + - containerPort: 443 +{{- if .Values.ingressGateway.mgmtHttpPort }} + - containerPort: {{ .Values.ingressGateway.mgmtHttpPort }} +{{- end }} +{{- if .Values.ingressGateway.mgmtHttpsPort }} + - containerPort: {{ .Values.ingressGateway.mgmtHttpsPort }} +{{- end }} +{{- range .Values.ingressGateway.tcpPort }} + - containerPort: {{ .port }} +{{- end }} + volumeMounts: + - mountPath: /cpx/conf/ + name: cpx-conf + env: + - name: "EULA" + value: "{{ .Values.ingressGateway.EULA }}" + - name: "MGMT_HTTP_PORT" + value: "{{ .Values.ingressGateway.mgmtHttpPort }}" + - name: "MGMT_HTTPS_PORT" + value: "{{ .Values.ingressGateway.mgmtHttpsPort }}" + - name: "NS_CPX_LITE" + value: "{{ .Values.ingressGateway.lightWeightCPX }}" +{{- if or .Values.istioAdaptor.coeURL .Values.istioAdaptor.ADMIP }} + - name: "NS_ENABLE_NEWNSLOG" + value: "1" +{{- end }} + - name: "KUBERNETES_TASK_ID" + value: "" + - name: "LS_IP" + value: {{ .Values.istioAdaptor.ADMIP | default "" }} + - name: "LS_PORT" + value: "{{ .Values.ingressGateway.licenseServerPort}}" +{{- if .Values.istioAdaptor.ADMFingerPrint }} + - name: "NS_MGMT_SERVER" + value: {{ .Values.istioAdaptor.ADMIP }} + - name: "NS_MGMT_FINGER_PRINT" + value: {{ .Values.istioAdaptor.ADMFingerPrint | quote }} + - name: "NS_HTTP_PORT" + value: {{ .Values.ingressGateway.mgmtHttpPort | quote }} + - name: "NS_HTTPS_PORT" + value: {{ .Values.ingressGateway.mgmtHttpsPort | quote }} +{{- end }} + - name: "LOGSTREAM_COLLECTOR_IP" + value: {{ .Values.istioAdaptor.ADMIP | default ""}} + +--- +{{ else }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: citrix-ingressgateway + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + template: + metadata: + labels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + prometheus.io/port: "{{ .Values.metricExporter.port }}" + prometheus.io/scrape: "true" + spec: + containers: +{{- if eq .Values.metricExporter.required true }} + - name: exporter + image: {{ .Values.metricExporter.image }}:{{ .Values.metricExporter.version }} + imagePullPolicy: {{ .Values.metricExporter.imagePullPolicy }} + args: + - "--target-nsip={{- include "exporter_nsip" . -}}" + - "--port={{ .Values.metricExporter.port }}" + - "--secure={{ .Values.metricExporter.secure | lower}}" + - "--log-level={{ .Values.metricExporter.logLevel }}" + env: + - name: NS_USER + valueFrom: + secretKeyRef: + name: nslogin + key: username + - name: NS_PASSWORD + valueFrom: + secretKeyRef: + name: nslogin + key: password +{{- end }} + - name: istio-adaptor + image: {{ .Values.istioAdaptor.image }}:{{ .Values.istioAdaptor.tag }} + imagePullPolicy: {{ .Values.istioAdaptor.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: APPLICATION_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['app'] + args: + - -pilot-location +{{- if eq .Values.istioAdaptor.secureConnect true }} + - {{ .Values.istioPilot.name}}.{{.Values.istioPilot.namespace }}:{{ .Values.istioPilot.secureGrpcPort }} # istio-pilot.istio-system:15011 +{{- else }} + - {{ .Values.istioPilot.name}}.{{.Values.istioPilot.namespace }}:{{ .Values.istioPilot.insecureGrpcPort }} # istio-pilot.istio-system:15010 +{{- end }} + - -proxy-type + - {{ .Values.istioAdaptor.proxyType | default "router" | quote }} + - -pilot-SAN + - {{ .Values.istioPilot.SAN }} + - -netscaler-url + - {{ required "Mention Citrix ADC IP/URL in https://[:port] format" .Values.istioAdaptor.netscalerUrl }} + - -vserver-ip + - {{ required "Mention Vserver IP to be configured on Citrix ADC" .Values.istioAdaptor.vserverIP }} + - -secure-connect={{ .Values.istioAdaptor.secureConnect | default true }} + # If using VPX/MPX as Ingress gateway, then specify the network profile name + # which was provided to Citrix Node Controller (CNC) +{{- if .Values.istioAdaptor.netProfile }} + - -net-profile + - {{ .Values.istioAdaptor.netProfile }} +{{- end }} + - -adm-ip + - "" +{{- if .Values.istioAdaptor.coeURL }} + - -coe-url + - {{ .Values.istioAdaptor.coeURL }} +{{- end }} + securityContext: + readOnlyRootFilesystem: true + runAsUser: 32024 # UID of istio-adaptor container's user + volumeMounts: + - mountPath: /etc/certs + name: istio-certs + readOnly: true + - mountPath: /etc/nslogin + name: nslogin + readOnly: true + - mountPath: /etc/istio/ingressgateway-certs # Make sure that Gateway definition has this path mentioned in server.tls section for SIMPLE TLS + name: citrix-ingressgateway-certs + readOnly: true + - mountPath: /etc/istio/ingressgateway-ca-certs # Make sure that Gateway definition has this path mentioned in server.tls section for MUTUAL TLS + name: citrix-ingressgateway-ca-certs + readOnly: true + {{- range .Values.ingressGateway.secretVolumes }} + - name: {{ .name }} + mountPath: {{ .mountPath | quote }} + readOnly: true + {{- end }} + volumes: + - name: nslogin + secret: + secretName: nslogin + - name: istio-certs + secret: + optional: true + secretName: istio.default + - name: citrix-ingressgateway-certs + secret: + optional: true + secretName: "citrix-ingressgateway-certs" # IMPORTANT: This secret MUST BE created before deploying gateway and ingress-gateway + - name: citrix-ingressgateway-ca-certs + secret: + optional: true + secretName: "citrix-ingressgateway-ca-certs" # IMPORTANT: This secret MUST BE created before deploying gateway and ingress-gateway + {{- range .Values.ingressGateway.secretVolumes }} + - name: {{ .name }} + secret: + secretName: {{ .secretName | quote }} + optional: true + {{- end }} +--- +{{- end}} diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/ingressgateway-service.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/ingressgateway-service.yaml new file mode 100644 index 000000000..ba18349b3 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/ingressgateway-service.yaml @@ -0,0 +1,60 @@ +{{- if eq .Values.citrixCPX true }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: citrix-ingressgateway + namespace: {{ .Release.Namespace }} +spec: + maxReplicas: 1 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: citrix-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 +--- +{{- end }} +apiVersion: v1 +kind: Service +metadata: + name: citrix-ingressgateway + namespace: {{ .Release.Namespace }} + annotations: + labels: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} +spec: +{{- if eq .Values.citrixCPX true }} + type: LoadBalancer +{{- end }} + selector: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + ports: + - + name: http2 +{{- if eq .Values.citrixCPX true }} + nodePort: {{ .Values.ingressGateway.httpNodePort }} +{{- end }} + port: 80 + targetPort: 80 + - + name: https +{{- if eq .Values.citrixCPX true }} + nodePort: {{ .Values.ingressGateway.httpsNodePort }} +{{- end }} + port: 443 + targetPort: 443 +{{- $isCPX := .Values.citrixCPX }} +{{- range .Values.ingressGateway.tcpPort }} + - + name: {{ .name }} +{{- if eq $isCPX true }} + nodePort: {{ .nodePort }} +{{- end }} + port: {{ .port }} + targetPort: {{ .targetPort }} +{{- end }} +--- diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/metrics-exporter-service.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/metrics-exporter-service.yaml new file mode 100644 index 000000000..ad77e2374 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/metrics-exporter-service.yaml @@ -0,0 +1,17 @@ +{{- if eq .Values.metricExporter.required true }} +kind: Service +apiVersion: v1 +metadata: + name: exporter + annotations: + labels: + service-type: citrix-adc-monitor +spec: + selector: + app: {{ .Values.ingressGateway.label | default "citrix-ingressgateway" }} + ports: + - name: exporter-port + port: {{ .Values.metricExporter.port }} + targetPort: {{ .Values.metricExporter.port }} +--- +{{- end }} diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/secret.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/secret.yaml new file mode 100644 index 000000000..0201c116d --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if eq .Values.citrixCPX true }} +apiVersion: v1 +kind: Secret +metadata: + name: nslogin + namespace: {{ .Release.Namespace }} +type: Opaque +data: + username: "bnNyb290" + password: "bnNyb290" +{{- end }} diff --git a/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/values.yaml b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/values.yaml new file mode 100644 index 000000000..ba929f218 --- /dev/null +++ b/charts/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway/1.2.100/values.yaml @@ -0,0 +1,50 @@ +# Default values for citrix-adc-istio-ingress-gateway +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +citrixCPX: false + +metricExporter: + required: true + image: quay.io/citrix/citrix-adc-metrics-exporter + version: 1.4.0 + port: 8888 + secure: "YES" + logLevel: ERROR + imagePullPolicy: IfNotPresent + +istioAdaptor: + image: quay.io/citrix/citrix-istio-adaptor + tag: 1.2.1 + imagePullPolicy: IfNotPresent + netscalerUrl: null + proxyType: router + secureConnect: true + vserverIP: + netProfile: + ADMIP: + ADMFingerPrint: + coeURL: + +istioPilot: + name: istio-pilot + namespace: istio-system + secureGrpcPort: 15011 + insecureGrpcPort: 15010 + SAN: spiffe://cluster.local/ns/istio-system/sa/istio-pilot-service-account + +ingressGateway: + image: quay.io/citrix/citrix-k8s-cpx-ingress + tag: 13.0-47.22 + imagePullPolicy: IfNotPresent + EULA: NO + mgmtHttpPort: 10080 + mgmtHttpsPort: 10443 + httpNodePort: 30180 + httpsNodePort: 31443 + lightWeightCPX: 1 + secretVolumes: + #licenseServerIP: this value will be taken from istioAdaptor.ADMIP + licenseServerPort: 27000 + label: citrix-ingressgateway + tcpPort: diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/Chart.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/Chart.yaml new file mode 100644 index 000000000..1d7bcc0e8 --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/Chart.yaml @@ -0,0 +1,18 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: citrix-cpx-with-ingress-controller +apiVersion: v1 +appVersion: 1.8.28 +description: A Helm chart for Citrix ADC CPX with Citrix ingress Controller running + as sidecar. +home: https://www.citrix.com +icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png +maintainers: +- email: priyanka.sharma@citrix.com + name: priyankash-citrix +- email: subash.dangol@citrix.com + name: subashd +name: citrix-cpx-with-ingress-controller +sources: +- https://github.com/citrix/citrix-k8s-ingress-controller +version: 1.8.2800 diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/README.md b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/README.md new file mode 100644 index 000000000..f3cee100f --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/README.md @@ -0,0 +1,234 @@ +# Citrix ADC CPX with Citrix Ingress Controller running as sidecar. + +In a [Kubernetes](https://kubernetes.io/) or [OpenShift](https://www.openshift.com) cluster, you can deploy [Citrix ADC CPX](https://docs.citrix.com/en-us/citrix-adc-cpx) with Citrix ingress controller as a [sidecar](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). The Citrix ADC CPX instance is used for load balancing the North-South traffic to the microservices in your cluster. And, the sidecar Citrix ingress controller configures the Citrix ADC CPX. + +## TL;DR; + +### For Kubernetes + ``` + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install cpx citrix/citrix-cpx-with-ingress-controller --set license.accept=yes + ``` + +### For OpenShift + + ``` + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + + helm install cpx citrix/citrix-cpx-with-ingress-controller --set license.accept=yes,openshift=true + ``` + +> **Important:** +> +> The "license.accept" is a mandatory argument and should be set to "yes" to accept the terms of the Citrix license. + + +## Introduction +This Helm chart deploys a Citrix ADC CPX with Citrix ingress controller as a sidecar in the [Kubernetes](https://kubernetes.io/) or in the [Openshift](https://www.openshift.com) cluster using the [Helm](https://helm.sh/) package manager. + +### Prerequisites + +- The [Kubernetes](https://kubernetes.io/) version is 1.6 or later if using Kubernetes environment. +- The [Openshift](https://www.openshift.com) version 3.11.x or later if using OpenShift platform. +- The [Helm](https://helm.sh/) version 3.x or later. You can follow instruction given [here](https://github.com/citrix/citrix-helm-charts/blob/master/Helm_Installation_version_3.md) to install the same. +- You have installed [Prometheus Operator](https://github.com/coreos/prometheus-operator), if you want to view the metrics of the Citrix ADC CPX collected by the [metrics exporter](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/metrics-visualizer#visualization-of-metrics). +- Registration of Citrix ADC CPX in ADM: You may want to register your CPX in ADM for licensing or to obtain [servicegraph](https://docs.citrix.com/en-us/citrix-application-delivery-management-service/application-analytics-and-management/service-graph.html). For this you will have to create a Kubernetes secret using ADM credentials and provide it while install the chart. Create a Kubernetes secret for the user name and password using the following command: + + ``` + kubectl create secret generic admlogin --from-literal=username= --from-literal=password= -n citrix-system + ``` + +## Installing the Chart +Add the Citrix Ingress Controller helm chart repository using command: + + ``` + helm repo add citrix https://citrix.github.io/citrix-helm-charts/ + ``` + +### For Kubernetes: +#### 1. Citrix ADC CPX with Citrix Ingress Controller running as side car. +To install the chart with the release name ``` my-release```: + + ``` + helm install my-release citrix/citrix-cpx-with-ingress-controller --set license.accept=yes,ingressClass[0]= + ``` + +> **Note:** +> +> By default the chart installs the recommended [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/) roles and role bindings. + +The command deploys Citrix ADC CPX with Citrix ingress controller as a sidecar on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the mandatory and optional parameters that you can configure during installation. + +#### 2. Citrix ADC CPX with Citrix Ingress Controller and Exporter running as side car. +[Metrics exporter](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/metrics-visualizer#visualization-of-metrics) can be deployed as sidecar to the Citrix ADC CPX and collects metrics from the Citrix ADC CPX instance. You can then [visualize these metrics](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/metrics/promotheus-grafana/) using Prometheus Operator and Grafana. +> **Note:** +> +> Ensure that you have installed [Prometheus Operator](https://github.com/coreos/prometheus-operator). + +Use the following command for this: + ``` + helm install my-release citrix/citrix-cpx-with-ingress-controller --set license.accept=yes,ingressClass[0]=,exporter.required=true + ``` + +### For OpenShift: +Add the service account named "cpx-ingress-k8s-role" to the privileged Security Context Constraints of OpenShift: + + ``` + oc adm policy add-scc-to-user privileged system:serviceaccount::cpx-ingress-k8s-role + ``` + +#### 1. Citrix ADC CPX with Citrix Ingress Controller running as side car. +To install the chart with the release name, `my-release`, use the following command: + ``` + helm install my-release citrix/citrix-cpx-with-ingress-controller --set license.accept=yes,openshift=true + ``` + +#### 2. Citrix ADC CPX with Citrix Ingress Controller and Exporter running as side car. +[Metrics exporter](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/metrics-visualizer#visualization-of-metrics) can be deployed as sidecar to the Citrix ADC CPX and collects metrics from the Citrix ADC CPX instance. You can then [visualize these metrics](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/metrics/promotheus-grafana/) using Prometheus Operator and Grafana. +> **Note:** +> +> Ensure that you have installed [Prometheus Operator](https://github.com/coreos/prometheus-operator). + +Use the following command for this: + ``` + helm install my-release citrix/citrix-k8s-ingress-controller --set license.accept=yes,openshift=true,exporter.required=true + ``` + +### Installed components + +The following components are installed: + +- [Citrix ADC CPX](https://docs.citrix.com/en-us/citrix-adc-cpx/netscaler-cpx.html) +- [Citrix ingress controller](https://github.com/citrix/citrix-k8s-ingress-controller) (if enabled) +- [Exporter](https://github.com/citrix/citrix-adc-metrics-exporter) (if enabled) + + +## CRDs configuration + +CRDs gets installed/upgraded automatically when we install/upgrade Citrix ADC CPX with Citrix ingress controller using Helm. If you do not want to install CRDs, then set the option `crds.install` to `false`. By default, CRDs too get deleted if you uninstall through Helm. This means, even the CustomResource objects created by the customer will get deleted. If you want to avoid this data loss set `crds.retainOnDelete` to `true`. + +> **Note:** +> Installing again may fail due to the presence of CRDs. Make sure that you back up all CustomResource objects and clean up CRDs before re-installing Citrix ADC CPX with Citrix ingress controller. + +There are a few examples of how to use these CRDs, which are placed in the folder: [Example-CRDs](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds). Refer to them and install as needed, using the following command: +```kubectl create -f ``` + +### Details of the supported CRDs: + +#### authpolicies CRD: + +Authentication policies are used to enforce access restrictions to resources hosted by an application or an API server. + +Citrix provides a Kubernetes CustomResourceDefinitions (CRDs) called the [Auth CRD](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/crd/auth) that you can use with the Citrix ingress controller to define authentication policies on the ingress Citrix ADC. + +Example file: [auth_example.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/auth_example.yaml) + +#### continuousdeployments CRD for canary: + +Canary release is a technique to reduce the risk of introducing a new software version in production by first rolling out the change to a small subset of users. After user validation, the application is rolled out to the larger set of users. Citrix ADC-Integrated [Canary Deployment solution](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/crd/canary) stitches together all components of continuous delivery (CD) and makes canary deployment easier for the application developers. + +#### httproutes and listeners CRDs for contentrouting: + +[Content Routing (CR)](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/crd/contentrouting) is the execution of defined rules that determine the placement and configuration of network traffic between users and web applications, based on the content being sent. For example, a pattern in the URL or header fields of the request. + +Example files: [HTTPRoute_crd.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/HTTPRoute_crd.yaml), [Listener_crd.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/Listener_crd.yaml) + +#### ratelimits CRD: + +In a Kubernetes deployment, you can [rate limit the requests](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/crd/ratelimit) to the resources on the back end server or services using rate limiting feature provided by the ingress Citrix ADC. + +Example files: [ratelimit-example1.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/ratelimit-example1.yaml), [ratelimit-example2.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/ratelimit-example2.yaml) + +#### vips CRD: + +Citrix provides a CustomResourceDefinitions (CRD) called [VIP](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/crd/vip) for asynchronous communication between the IPAM controller and Citrix ingress controller. + +The IPAM controller is provided by Citrix for IP address management. It allocates IP address to the service from a defined IP address range. The Citrix ingress controller configures the IP address allocated to the service as virtual IP (VIP) in Citrix ADX VPX. And, the service is exposed using the IP address. + +When a new service is created, the Citrix ingress controller creates a CRD object for the service with an empty IP address field. The IPAM Controller listens to addition, deletion, or modification of the CRD and updates it with an IP address to the CRD. Once the CRD object is updated, the Citrix ingress controller automatically configures Citrix ADC-specfic configuration in the tier-1 Citrix ADC VPX. + +#### rewritepolicies CRD: + +In kubernetes environment, to deploy specific layer 7 policies to handle scenarios such as, redirecting HTTP traffic to a specific URL, blocking a set of IP addresses to mitigate DDoS attacks, imposing HTTP to HTTPS and so on, requires you to add appropriate libraries within the microservices and manually configure the policies. Instead, you can use the [Rewrite and Responder features](https://github.com/citrix/citrix-k8s-ingress-controller/blob/master/crd/rewrite-responder-policies-deployment.yaml) provided by the Ingress Citrix ADC device to deploy these policies. + +Example files: [target-url-rewrite.yaml](https://github.com/citrix/citrix-helm-charts/tree/master/example-crds/target-url-rewrite.yaml) + + +## Configuration +The following table lists the configurable parameters of the Citrix ADC CPX with Citrix ingress controller as side car chart and their default values. + +| Parameters | Mandatory or Optional | Default value | Description | +| ---------- | --------------------- | ------------- | ----------- | +| license.accept | Mandatory | no | Set `yes` to accept the Citrix ingress controller end user license agreement. | +| image | Mandatory | `quay.io/citrix/citrix-k8s-cpx-ingress:13.0-58.30` | The Citrix ADC CPX image. | +| pullPolicy | Mandatory | IfNotPresent | The Citrix ADC CPX image pull policy. | +| cic.image | Mandatory | `quay.io/citrix/citrix-k8s-ingress-controller:1.8.28` | The Citrix ingress controller image. | +| cic.pullPolicy | Mandatory | IfNotPresent | The Citrix ingress controller image pull policy. | +| cic.required | Mandatory | true | CIC to be run as sidecar with Citrix ADC CPX | +| logLevel | Optional | DEBUG | The loglevel to control the logs generated by CIC. The supported loglevels are: CRITICAL, ERROR, WARNING, INFO, DEBUG and TRACE. For more information, see [Logging](https://github.com/citrix/citrix-k8s-ingress-controller/blob/master/docs/configure/log-levels.md).| +| defaultSSLCert | Optional | N/A | Default SSL certificate that needs to be used as a non-SNI certificate in Citrix ADC. | +| http2ServerSide | Optional | OFF | Enables HTTP2 for Citrix ADC service group configurations. | +| logProxy | Optional | N/A | Provide Elasticsearch or Kafka or Zipkin endpoint for Citrix observability exporter. | +| nsNamespace | Optional | k8s | The prefix for the resources on the Citrix ADC CPX. | +| ingressClass | Optional | N/A | If multiple ingress load balancers are used to load balance different ingress resources. You can use this parameter to specify Citrix ingress controller to configure Citrix ADC associated with specific ingress class.| +| openshift | Optional | false | Set this argument if OpenShift environment is being used. | +| nodeSelector.key | Optional | N/A | Node label key to be used for nodeSelector option for CPX-CIC deployment. | +| nodeSelector.value | Optional | N/A | Node label value to be used for nodeSelector option in CPX-CIC deployment. | + +| ADMSettings.licenseServerIP | Optional | N/A | Provide the Citrix Application Delivery Management (ADM) IP address to license Citrix ADC CPX. For more information, see [Licensing](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/licensing/)| +| ADMSettings.licenseServerPort | Optional | 27000 | Citrix ADM port if non-default port is used. | +| ADMSettings.ADMIP | Optional | | Citrix Application Delivery Management (ADM) IP address. | +| ADMSettings.ADMFingerPrint | Optional | N/A | Citrix Application Delivery Management (ADM) Finger Print. For more information, see [this](https://docs.citrix.com/en-us/citrix-application-delivery-management-service/application-analytics-and-management/service-graph.html). | +| ADMSettings.loginSecret | Optional | N/A | The secret key to log on to the ADM. For information on how to create the secret keys, see [Prerequisites](#prerequistes). | +| ADMSettings.bandWidthLicense | Optional | False | Set to true if you want to use bandwidth based licensing for Citrix ADC CPX. | +| ADMSettings.bandWidth | Optional | N/A | Desired bandwidth capacity to be set for Citrix ADC CPX in Mbps. | +| ADMSettings.vCPULicense | Optional | N/A | Set to true if you want to use vCPU based licensing for Citrix ADC CPX. | +| ADMSettings.cpxCores | Optional | 1 | Desired number of vCPU to be set for Citrix ADC CPX. | + +| exporter.required | Optional | false | Use the argument if you want to run the [Exporter for Citrix ADC Stats](https://github.com/citrix/citrix-adc-metrics-exporter) along with Citrix ingress controller to pull metrics for the Citrix ADC CPX| +| exporter.image | Optional | `quay.io/citrix/citrix-adc-metrics-exporter:1.4.5` | The Exporter for Citrix ADC Stats image. | +| exporter.pullPolicy | Optional | IfNotPresent | The Exporter for Citrix ADC Stats image pull policy. | +| exporter.ports.containerPort | Optional | 8888 | The Exporter for Citrix ADC Stats container port. | + +| coeConfig.required | Mandatory | false | Set this to true if you want to configure Citrix ADC to send metrics and transaction records to COE. | +| coeConfig.distributedTracing.enable | Optional | false | Set this value to true to enable OpenTracing in Citrix ADC. | +| coeConfig.distributedTracing.samplingrate | Optional | 100 | Specifies the OpenTracing sampling rate in percentage. | +| coeConfig.endpoint.server | Optional | N/A | Set this value as the IP address or DNS address of the analytics server. | +| coeConfig.timeseries.port | Optional | 5563 | Specify the port used to expose COE service for timeseries endpoint. | +| coeConfig.timeseries.metrics.enable | Optional | Set this value to true to enable sending metrics from Citrix ADC. | +| coeConfig.timeseries.metrics.mode | Optional | avro | Specifies the mode of metric endpoint. | +| coeConfig.timeseries.auditlogs.enable | Optional | false | Set this value to true to export audit log data from Citrix ADC. | +| coeConfig.timeseries.events.enable | Optional | false | Set this value to true to export events from the Citrix ADC. | +| coeConfig.transactions.enable | Optional | false | Set this value to true to export transactions from Citrix ADC. | +| coeConfig.transactions.port | Optional | 5557 | Specify the port used to expose COE service for transaction endpoint. | + +| crds.install | Optional | true | Unset this argument if you don't want to install CustomResourceDefinitions which are consumed by CIC. | +| crds.retainOnDelete | Optional | false | Set this argument if you want to retain CustomResourceDefinitions even after uninstalling CIC. This will avoid data-loss of Custom Resource Objects created before uninstallation. | + +> **Note:** +> +> If Citrix ADM related information is not provided during installation, Citrix ADC CPX will come up with the default license. + +Alternatively, you can define a YAML file with the values for the parameters and pass the values while installing the chart. + +For example: + ``` + helm install my-release citrix/citrix-cpx-with-ingress-controller -f values.yaml + ``` + +> **Tip:** +> +> The [values.yaml](https://github.com/citrix/citrix-helm-charts/blob/master/citrix-cpx-with-ingress-controller/values.yaml) contains the default values of the parameters. + +## Uninstalling the Chart +To uninstall/delete the ```my-release``` deployment: + ``` + helm delete my-release + ``` + +## Related documentation + +- [Citrix ADC CPX Documentation](https://docs.citrix.com/en-us/citrix-adc-cpx/12-1/cpx-architecture-and-traffic-flow.html) +- [Citrix ingress controller Documentation](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/) +- [Citrix ingress controller GitHub](https://github.com/citrix/citrix-k8s-ingress-controller) diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/app-readme.md b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/app-readme.md new file mode 100644 index 000000000..ef45a3d90 --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/app-readme.md @@ -0,0 +1,5 @@ +# Citrix ADC CPX with Citrix Ingress Controller running as sidecar. + +In a [Kubernetes](https://kubernetes.io/) or [OpenShift](https://www.openshift.com) cluster, you can deploy [Citrix ADC CPX](https://docs.citrix.com/en-us/citrix-adc-cpx) with Citrix ingress controller as a [sidecar](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/). The Citrix ADC CPX instance is used for load balancing the North-South traffic to the microservices in your cluster. And, the sidecar Citrix ingress controller configures the Citrix ADC CPX. + +This Chart bootstraps deployment of Citrix ADC CPX with Citrix Ingress Controller as sidecar. diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/questions.yml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/questions.yml new file mode 100644 index 000000000..0c8714413 --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/questions.yml @@ -0,0 +1,211 @@ +questions: +- variable: license.accept + required: true + default: "no" + type: enum + description: "Set to yes to accept the terms and conditions of the Citrix license." + label: Accept License + group: "Deployment Settings" + options: + - "yes" + - "no" +- variable: openshift + default: false + type: boolean + description: "openshift is set to true if charts are being deployed in OpenShift environment" + label: Openshift flag + group: "Deployment Settings" +- variable: nsNamespace + type: string + description: "Prefix for the resources on Citrix ADC" + label: Resource Prefix + group: "Deployment Settings" +- variable: ingressClass[0] + type: string + description: "ingressClass is the name of the Ingress Class" + label: Ingress Class + group: "Deployment Settings" +- variable: logLevel + default: "DEBUG" + type: enum + options: + - "TRACE" + - "DEBUG" + - "INFO" + - "WARNING" + - "ERROR" + description: "logLevel of Citrix Ingress Controller pod" + label: LogLevel + group: "Deployment Settings" +- variable: defaultSSLCert + type: string + description: "Secret containing the default ceritifcate for SSL vservers" + label: Default SSLCert + group: "ADC Settings" +- variable: logProxy + type: string + description: "Elasticsearch or Kafka or Zipkin endpoint for Citrix observability exporte" + label: LogProxy + group: "Deployment Settings" +- variable: http2ServerSide + default: "OFF" + type: enum + options: + - "ON" + - "OFF" + description: "Set to ON to enable HTTP2 for Citrix ADC service group configurations" + label: HTTP2 on ADC + group: "ADC Settings" +- variable: nodeSelector.key + type: string + label: NodeSelector Key + group: "Deployment Settings" +- variable: nodeSelector.value + type: string + label: NodeSelector Value + group: "Deployment Settings" + + +- variable: ADMSettings.licenseServerIP + type: string + label: ADM LicenseServerIP + group: "ADM Settings" +- variable: ADMSettings.licenseServerPort + default: 27000 + type: int + label: ADM LicenseServerPort + group: "ADM Settings" +- variable: ADMSettings.ADMIP + type: string + label: ADM IP + group: "ADM Settings" +- variable: ADMSettings.ADMFingerPrint + type: string + label: ADM FingerPrint + group: "ADM Settings" +- variable: ADMSettings.loginSecret + type: string + label: ADM Login Secret + group: "ADM Settings" +- variable: ADMSettings.bandWidthLicense + type: boolean + label: CPX Bandwidth License + group: "ADM Settings" +- variable: ADMSettings.bandWidth + type: int + label: CPX Bandwidth + group: "ADM Settings" +- variable: ADMSettings.vCPULicense + type: boolean + label: CPX vCPU License + group: "ADM Settings" +- variable: ADMSettings.cpxCores + type: int + label: CPX Cores + group: "ADM Settings" +- variable: cic.pullpolicy + default: "IfNotPresent" + type: enum + label: CIC Image Pullpolicy + group: "CIC/CPX Image Settings" + options: + - "Always" + - "IfNotPresent" + - "Never" +- variable: pullpolicy + default: "IfNotPresent" + type: enum + label: CPX Image Pullpolicy + group: "CIC/CPX Image Settings" + options: + - "Always" + - "IfNotPresent" + - "Never" +- variable: cic.image + default: "quay.io/citrix/citrix-k8s-ingress-controller:1.8.28" + type: string + label: CIC Image + group: "CIC/CPX Image Settings" +- variable: image + type: string + default: "quay.io/citrix/citrix-k8s-cpx-ingress:13.0-58.30" + label: CPX Image + group: "CIC/CPX Image Settings" +- variable: exporter.image + default: "quay.io/citrix/citrix-adc-metrics-exporter:1.4.5" + type: string + description: "Exporter Image to be used" + label: Exporter Image + group: "Exporter Settings" +- variable: exporter.pullPolicy + default: "IfNotPresent" + type: string + description: "Exporter Image pull policy" + label: Exporter Image PullPolicy + group: "Exporter Settings" +- variable: exporter.ports.containerPort + default: 8888 + type: int + label: Exporter ContainerPort + group: "Exporter Settings" +- variable: coeConfig.distributedTracing.enable + default: false + type: boolean + label: Enable distributedTracing + group: "COE Settings" +- variable: coeConfig.distributedTracing.samplingrate + default: 100 + type: int + label: COE Sampling Rate + group: "COE Settings" +- variable: coeConfig.endpoint.server + type: string + label: COE Endpoint Server + group: "COE Settings" +- variable: coeConfig.timeseries.port + default: 5563 + type: int + label: COE timeseries port + group: "COE Settings" +- variable: coeConfig.timeseries.metrics.enable + default: false + type: boolean + label: Enable timeseries metrics + group: "COE Settings" +- variable: coeConfig.timeseries.metrics.mode + default: 'avro' + type: string + label: COE timeseries metrics Mode + group: "COE Settings" +- variable: coeConfig.timeseries.auditlogs.enable + default: false + type: string + label: Enable timeseries auditlogs + group: "COE Settings" +- variable: coeConfig.timeseries.events.enable + default: false + type: string + label: Enable timeseries events + group: "COE Settings" +- variable: coeConfig.transactions.enable + default: false + type: string + label: Enable transactions + group: "COE Settings" +- variable: coeConfig.transactions.port + default: 5557 + type: int + label: COE transactions port + group: "COE Settings" +- variable: crds.install + default: true + type: boolean + description: "If set to true the charts will install CustomResourceDefinitions which are consumed by CIC." + label: CRD flag + group: "Deployment Settings" +- variable: crds.retainOnDelete + default: false + type: boolean + description: "Set this argument to true if you want to retain CustomResourceDefinitions even after uninstalling CIC. This will avoid data-loss of Custom Resource Objects created before uninstallation." + label: CRD retainOnDelete flag + group: "Deployment Settings" diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/NOTES.txt b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/NOTES.txt new file mode 100644 index 000000000..bccfdf69a --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/NOTES.txt @@ -0,0 +1,14 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get {{ .Release.Name }} + + +To delete : + helm delete {{ .Release.Name }} + diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/_helpers.tpl b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/_helpers.tpl new file mode 100644 index 000000000..5fd1f1d61 --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/_helpers.tpl @@ -0,0 +1,11 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Analytics Server IP or DNS +*/}} +{{- define "analytics.server" -}} +{{- if .Values.coeConfig.endpoint.server -}} +{{- printf .Values.coeConfig.endpoint.server -}} +{{- else -}} +{{- printf "coe.%s.svc.cluster.local" .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/cic_crds.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/cic_crds.yaml new file mode 100644 index 000000000..2ca841373 --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/cic_crds.yaml @@ -0,0 +1,1009 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rewritepolicies.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + group: citrix.com + version: v1 + names: + kind: rewritepolicy + plural: rewritepolicies + singular: rewritepolicy + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + validation: + openAPIV3Schema: + properties: + spec: + properties: + rewrite-policies: + type: array + items: + properties: + servicenames: + description: 'Name of the services that needs to be binded to rewrite policy.' + type: array + items: + type: string + maxLength: 127 + goto-priority-expression: + description: 'Expression or other value specifying the next policy to be + evaluated if the current policy evaluates to TRUE. + Specify one of the following values: + * NEXT - Evaluate the policy with the next higher priority number. + * END - End policy evaluation. + Default value of goto-priority-expression: END' + type: string + maxLength: 1499 + logpackets: + description: 'Adds an audit message action. + The action specifies whether to log the message, and to which log.' + properties: + logexpression: + description: 'Default-syntax expression that defines the format and content of the log message.' + type: string + maxLength: 7991 + loglevel: + description: 'Audit log level, which specifies the severity level of the log message being generated.' + type: string + enum: ["EMERGENCY", "ALERT", "CRITICAL", "ERROR", "WARNING", "NOTICE", "INFORMATIONAL", "DEBUG"] + required: [logexpression, loglevel] + rewrite-policy: + properties: + rewrite-criteria: + description: 'Expression against which traffic is evaluated.' + type: string + maxLength: 1299 + default-action: + description: 'Action to perform if the result of policy evaluation is undefined (UNDEF). + An UNDEF event indicates an internal error condition.' + type: string + maxLength: 77 + enum: ['NOREWRITE', 'RESET', 'DROP'] + operation: + description: 'Type of user-defined rewrite action.' + type: string + enum: ["noop", "delete", "insert_http_header", "delete_http_header", + "corrupt_http_header", "insert_before", "insert_after", "replace", + "replace_http_res", "delete_all", "replace_all", "insert_before_all", + "insert_after_all", "clientless_vpn_encode", "clientless_vpn_encode_all", + "clientless_vpn_decode", "clientless_vpn_decode_all", "insert_sip_header", + "delete_sip_header", "corrupt_sip_header", "replace_sip_res", "replace_diameter_header_field", + "replace_dns_header_field", "replace_dns_answer_section"] + target: + description: 'Default syntax expression that specifies which part of the request or response to rewrite.' + type: string + maxLength: 1229 + modify-expression: + description: 'Default syntax expression that specifies the content to insert into the request + or response at the specified location, or that replaces the specified string.' + type: string + maxLength: 7991 + multiple-occurence-modify: + description: 'Search facility that is used to match multiple strings in the request or response.' + type: string + maxLength: 171 + additional-multiple-occurence-modify: + description: 'Specify additional criteria to refine the results of the search. + Always starts with the "extend(m,n)" operation, where "m" specifies number of bytes to the left of selected data + and "n" specifies number of bytes to the right of selected data. + You can use refineSearch only on body expressions, and only when rewrite-criteria is any one of this: + INSERT_BEFORE_ALL, INSERT_AFTER_ALL, REPLACE_ALL, and DELETE_ALL.' + type: string + maxLength: 1299 + direction: + description: 'Bind point to which to bind the policy.' + type: string + enum: ["REQUEST","RESPONSE"] + comment: + description: 'Any comments to preserve information about this rewrite policy.' + type: string + maxLength: 255 + required: [rewrite-criteria, operation, target, direction] + required: [servicenames, rewrite-policy] + + responder-policies: + type: array + items: + properties: + servicenames: + description: 'Name of the services that needs to be binded to responder policy.' + type: array + items: + type: string + maxLength: 127 + goto-priority-expression: + description: 'Expression or other value specifying the next policy to be + evaluated if the current policy evaluates to TRUE. + Specify one of the following values: + * NEXT - Evaluate the policy with the next higher priority number. + * END - End policy evaluation. + Default value of goto-priority-expression: END' + type: string + maxLength: 1499 + logpackets: + description: 'Adds an audit message action. + The action specifies whether to log the message, and to which log.' + properties: + logexpression: + description: 'Default-syntax expression that defines the format and content of the log message.' + type: string + maxLength: 7991 + loglevel: + description: 'Audit log level, which specifies the severity level of the log message being generated.' + type: string + enum: ["EMERGENCY", "ALERT", "CRITICAL", "ERROR", "WARNING", + "NOTICE", "INFORMATIONAL", "DEBUG"] + required: [logexpression, loglevel] + responder-policy: + properties: + redirect: + description: 'Use this option when you want to Redirect the request when request matches to policy.' + properties: + url: + description: 'URL on which you want to redirect the request.' + type: string + maxLength: 7991 + redirect-status-code: + description: 'HTTP response status code, for example 200, 302, 404, etc.' + type: integer + minimum: 100 + maximum: 599 + redirect-reason: + description: 'Expression specifying the reason for redirecting the request.' + type: string + maxLength: 7991 + required: [url] + respondwith: + description: 'Use this parameter when you want to respond to the request when request matches to policy.' + properties: + http-payload-string: + description: 'Expression that you want to sent as response to the request.' + type: string + maxLength: 7991 + required: [http-payload-string] + noop: + description: 'Use this option when you want to send the request to the protected server instead of + responding to it when request matches to policy.' + properties: + target: + description: 'Default syntax expression that specifies to perform noop operation on' + type: string + maxLength: 1229 + reset: + description: 'Use this option when you want to Reset the client connection by closing it when request matches to policy.' + properties: + drop: + description: 'Use this option when you want to drop the request without sending a response to the user when request matches to policy.' + properties: + respond-criteria: + description: 'Default syntax expression that the policy uses to determine whether to respond to the specified request.' + type: string + maxLength: 1299 + default-action: + description: 'Action to perform if the result of policy evaluation is undefined (UNDEF). + An UNDEF event indicates an internal error condition.' + type: string + maxLength: 77 + enum: ['NOOP', 'RESET', 'DROP'] + comment: + description: 'Any comments to preserve information about this responder policy.' + type: string + maxLength: 255 + required: [respond-criteria] + oneOf: [required: [redirect], required: [respondwith], required: [noop], required: [reset], required: [drop]] + required: [servicenames, responder-policy] + + dataset: + type: array + items: + properties: + name: + description: 'Name of the dataset.' + type: string + maxLength: 32 + type: + description: 'Type of value to bind to the dataset.' + type: string + enum: ["ipv4", "number", "ipv6", "ulong", "double", "mac"] + comment: + description: 'Any comments to preserve information about this dataset.' + type: string + maxLength: 255 + values: + description: 'Value of the specified type that is associated with this dataset.' + type: array + required: [name, type, values] + + patset: + type: array + items: + properties: + name: + description: 'Name of the Patset.' + type: string + maxLength: 32 + comment: + description: 'Any comments to preserve information about this patset.' + type: string + maxLength: 255 + values: + description: 'String of characters that constitutes a pattern and is associated with this patset.' + type: array + required: [name, values] + + stringmap: + type: array + items: + properties: + name: + description: 'Name of the Stringmap.' + type: string + maxLength: 32 + comment: + description: 'Any comments to preserve information about this stringmap.' + type: string + maxLength: 255 + values: + description: 'List of (key,value) pairs to be bound to this string map.' + type: array + items: + properties: + key: + description: 'Character string constituting the key to be bound to this string map.' + type: string + maxLength: 2047 + value: + description: 'Character string constituting the value associated with the key.' + type: string + maxLength: 2047 + required: [name, values] + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ratelimits.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + group: citrix.com + version: v1beta1 + names: + kind: ratelimit + plural: ratelimits + singular: ratelimit + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + validation: + openAPIV3Schema: + properties: + spec: + properties: + servicenames: + description: 'Name of the services to which the ratelimit policies are applied.' + type: array + items: + type: string + maxLength: 127 + selector_keys: + description: 'Traffic match criteria to which apply above rate-limit/throttling. All keys are applied as AND condition. If no keys are specified, rate-limit applies at service level' + properties: + basic: + description: "Basic traffic stream selection criteria to which to apply the ratelimit" + properties: + path: + type: array + description: "api resource path prefix match. e.g. /api/v1/products" + items: + type: string + method: + type: array + items: + type: string + enum: ['GET', 'PUT', 'POST','DELETE'] + header_name: + description: "HTTP header that identifies the unique API client for e.g. X-apikey" + type: string + per_client_ip: + description: "Setting this applies the throttling limit to each unique Client IP address accessing the API resource" + type: boolean + req_threshold: + description: 'Max requests per timeslice units to be allowed' + type: integer + timeslice: + description: 'Timeslice in miliseconds in multiple of 10. Defaults to 1000 miliseconds' + type: integer + limittype: + description: "Burst mode or smooth. Defaults to burst mode if the limittype is not specified" + type: string + enum: ['BURSTY','SMOOTH'] + throttle_action: + type: string + enum: ['DROP', 'RESET','REDIRECT', 'RESPOND'] + description: "Drop will drop the requests exceeding limits, RESET will reset the client connection, Redirect will redirect to specified URL, respond will respond with 429 'Exceeded allowed rate of requests'" + redirect_url: + type: string + description: "Redirect-URL" + required: [servicenames, req_threshold] +--- +#Sample CRD instance + +#apiVersion: citrix.com/v1 +#description: VIP for apache service +#kind: vip +#metadata: +# name: service-apache +# namespace: default +#spec: +# description: VIP for the apache Service +# ipaddress: 10.99.98.90 +# kind: service +# name: apache + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: vips.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + group: citrix.com + version: v1 + names: + kind: vip + plural: vips + singular: vip + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + additionalPrinterColumns: + - JSONPath: .spec.ipaddress + name: VIP + type: string + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + validation: + openAPIV3Schema: + properties: + spec: + properties: + ipaddress: + type: string + name: + type: string + kind: + type: string + enum: ["service", "ingress"] + description: + type: string +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: authpolicies.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + group: citrix.com + version: v1beta1 + names: + kind: authpolicy + plural: authpolicies + singular: authpolicy + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + validation: + openAPIV3Schema: + properties: + spec: + properties: + servicenames: + description: 'Name of the services that needs to be binded to rewrite policy.' + type: array + items: + type: string + maxLength: 127 + auth_providers: + description: 'Auth Config for required auth providers, one or more of these can be created' + type: array + items: + description: " create config for a single auth provider of a particular type" + properties: + name: + description: 'Name for this provider, has to be unique, referenced by auth policies' + type: string + + oauth: + description: 'Auth provided by external oAuth provider' + properties: + issuer: + description: 'Identity of the server whose tokens are to be accepted' + type: string + jwks_uri: + description: 'URL of the endpoint that contains JWKs (Json Web Key) for JWT (Json Web Token) verification' + type: string + audience: + description: 'Audience for which token sent by Authorization server is applicable' + type: array + items: + type: string + token_in_hdr: + description: 'custom header name where token is present, default is Authorization header' + type: array + items: + type: string + token_in_param: + description: 'query parameter name where token is present' + type: array + items: + type: string + + basic_local_db: + description: 'Basic HTTP authentication, user data in local DB' + + required: + - name + + auth_policies: + description: "Auth policies" + type: array + items: + description: "Auth policy" + properties: + resource: + description: " endpoint/resource selection criteria" + properties: + path: + description: "api resource path e.g. /products. " + type: array + items: + type: string + method: + type: array + items: + type: string + enum: ['GET', 'PUT', 'POST','DELETE'] + required: + - path + provider: + description: "name of the auth provider for the policy, empty if no authentication required" + type: array + items: + type: string + required: + - resource + - provider + + required: + - servicenames + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: listeners.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +status: +spec: + group: citrix.com + version: v1alpha1 + names: + kind: Listener + plural: listeners + singular: listener + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + validation: + openAPIV3Schema: + required: [spec] + properties: + spec: + type: object + required: [protocol] + properties: + protocol: + type: string + enum: ["https", "http"] + description: "Protocol for this listener" + vip: + type: string + description: "Endpoint IP address, Optional for CPX, required for Tier-1 deployments" + port: + type: integer + minimum: 1 + maximum: 65535 + certificates: + type: array + description: "certificates attached to the endpoints - Not applicable for HTTP" + minItems: 1 + items: + type: object + properties: + preconfigured: + type: string + description: "Preconfigured Certificate name on ADC " + secret: + type: object + description: "Kuberentes secret object" + required: [name] + properties: + name: + type: string + description: "name of the Kubernetes Secret object where Cert is located" + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + namespace: + type: string + description: "Namespace of the kubernetes secret object; Default is same namespace where the Listener object is located" + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + default: + type: boolean + description: "Only one of the certificate can be marked as default which will be presented if none of the cert matches with the hostname" + routes: + type: array + description: "List of route objects attached to the listener" + minItems: 1 + items: + type: object + properties: + name: + type: string + description: "Name of the HTTPRoute object" + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + namespace: + type: string + description: "Namespace of the HTTPRoute object" + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + labelSelector: + description: "Labels key value pair, if the route carries the same labels, it is automatically attached" + type: object + additionalProperties: + type: string + oneOf: + - required: [name, namespace] + - required: [labelSelector] + defaultAction: + type: object + description: "Default action for the listener: One of Backend or Redirect" + properties: + backend: + type: object + oneOf: + - required: [kube] + properties: + kube: + type: object + required: [service, port] + properties: + service: + description: "Name of the backend service" + type: string + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + port: + description: "Service port" + type: integer + minimum: 1 + maximum: 65535 + namespace: + description: "Service namespace" + type: string + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + backendConfig: + description: "General backend service options" + properties: + secure_backend: + description: "Use Secure communications to the backends" + type: boolean + lbConfig: + description: "Citrix ADC LB vserver configurations for the backend. Refer: https://developer-docs.citrix.com/projects/netscaler-nitro-api/en/12.0/configuration/load-balancing/lbvserver/lbvserver/ for all configurations" + type: object + additionalProperties: + type: string + servicegroupConfig: + description: "Citrix ADC service group configurations for the backend; Refer: https://developer-docs.citrix.com/projects/netscaler-nitro-api/en/12.0/configuration/basic/servicegroup/servicegroup/ for all configurations" + type: object + additionalProperties: + type: string + redirect: + type: object + oneOf: + - required: [targetExpression] + - required: [hostRedirect] + - required: [httpsRedirect] + properties: + httpsRedirect: + description: "Change the scheme from http to https keeping URL intact" + type: boolean + hostRedirect: + description: "Host name specified is used for redirection with URL intact" + type: string + targetExpression: + description: "A target can be specified using Citrix ADC policy expression" + type: string + responseCode: + description: "Default response code is 302, which can be customised using this attribute" + type: integer + minimum: 100 + maximum: 599 + oneOf: + - required: ["backend"] + - required: ["redirect"] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: httproutes.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + group: citrix.com + version: v1alpha1 + names: + kind: HTTPRoute + plural: httproutes + singular: httproute + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + validation: + openAPIV3Schema: + required: [spec] + properties: + spec: + type: object + required: [rules] + properties: + hostname: + type: array + description: "List of domain names that share the same route, default is '*'" + minItems: 1 + items: + type: string + description: "Domain name" + rules: + type: array + description: "List Content routing rules with an action defined" + minItems: 1 + items: + type: object + required: [name, action] + properties: + name: + type: string + description: "A name to represent the rule, this is used as an identifier in content routing policy name in ADC" + minLength: 1 + maxLength: 20 + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + match: + type: array + description: "List of rules with same action" + minItems: 1 + items: + type: object + anyOf: + - required: [path] + - required: [headers] + - required: [cookies] + - required: [queryParams] + - required: [method] + - required: [policyExpression] + properties: + path: + type: object + description: "URL Path based content routing" + properties: + prefix: + type: string + description: "URL path matches the prefix expression" + exact: + type: string + description: "URL Path must match exact path" + regex: + type: string + description: "PCRE based regex expression for path matching" + headers: + type: array + description: "List of header for content routing - Must match all the rules- Treated as AND condition if more than 1 rule" + minItems: 1 + items: + type: object + description: "Header details for content routing, Check for existence of a header or header name-value match" + properties: + headerName: + type: object + description: "Header name based content routing, Here existence of header is used for routing" + properties: + exact: + type: string + description: "Header Name - treated as exact must exist" + contains: + type: string + description: "Header Name - A header must exist that contain the string the name" + regex: + type: string + description: "header Name - treated as PCRE regex expression" + not: + type: boolean + description: "Default False, if present, rules are inverted. I.e header name must not exist" + oneOf: + - required: [exact] + - required: [contains] + - required: [regex] + headerValue: + type: object + description: "Header Name and Value based match" + properties: + name: + type: string + description: "Header name that must match the value" + exact: + type: string + description: "Header value - treated as exact" + contains: + type: string + description: "Header value - treated as contains" + regex: + type: string + description: "header value - treated as PCRE regex expression" + not: + type: boolean + description: "Default False, if present, rules are inverted. I.e header if present must not match the value" + oneOf: + - required: [name, exact] + - required: [name, contains] + - required: [name, regex] + queryParams: + type: array + description: "List of Query parameters for content routing - Must match all the rules- Treated as AND condition if more than 1 rule" + minItems: 1 + items: + type: object + description: "Query parameters Name and Value based match" + properties: + name: + type: string + description: "Query name that must match the value. If no value is specified, matches with any value" + exact: + type: string + description: "Query value - Exact match" + contains: + type: string + description: "Query value - value must have the string(substring)" + regex: + type: string + description: "Query value - Value must match this regex patterm" + not: + type: boolean + description: "Default False, if present, rules are inverted. I.e query if present must not match the value" + anyOf: + - required: [name] + - oneOf: + - required: [name, exact] + - required: [name, contains] + - required: [name, regex] + cookies: + type: array + description: "List of Cookie params for content routing - Must match all the rules- Treated as AND condition if more than 1 rule" + minItems: 1 + items: + type: object + description: "Cookie based routing" + properties: + name: + type: string + description: "cookie name that must match the value. If no value specified, it matches with any value" + exact: + type: string + description: "cookie value - treated as exact" + contains: + type: string + description: "cookie value - treated as substring" + regex: + type: string + description: "cookie value - treated as PCRE regex expression" + not: + type: boolean + description: "Default False, if present, rules are inverted. I.e cookie if present must not match the value" + anyOf: + - required: [name] + - oneOf: + - required: [name, exact] + - required: [name, contains] + - required: [name, regex] + method: + type: string + description: "HTTP method for content routing eg: POST, PUT, DELETE etc" + policyExpression: + type: string + description: "Citrix ADC policy expressions; refer: https://docs.citrix.com/en-us/netscaler/media/expression-prefix.pdf" + action: + type: object + description: "Action for the matched rule" + properties: + backend: + type: object + oneOf: + - required: [kube] + properties: + kube: + type: object + required: [service, port] + properties: + service: + description: "Name of the backend service" + type: string + pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' + port: + description: "Service port" + type: integer + minimum: 1 + maximum: 65535 + backendConfig: + description: "General backend service options" + properties: + secure_backend: + description: "Use Secure communications to the backends" + type: boolean + lbConfig: + description: "Citrix ADC LB vserver configurations for the backend. Refer: https://developer-docs.citrix.com/projects/netscaler-nitro-api/en/12.0/configuration/load-balancing/lbvserver/lbvserver/ for all configurations" + type: object + additionalProperties: + type: string + servicegroupConfig: + description: "Citrix ADC service group configurations for the backend; Refer: https://developer-docs.citrix.com/projects/netscaler-nitro-api/en/12.0/configuration/basic/servicegroup/servicegroup/ for all configurations" + type: object + additionalProperties: + type: string + redirect: + type: object + oneOf: + - required: [targetExpression] + - required: [hostRedirect] + - required: [httpsRedirect] + properties: + httpsRedirect: + description: "Change the scheme from http to https keeping URL intact" + type: boolean + hostRedirect: + description: "Host name specified is used for redirection with URL intact" + type: string + targetExpression: + description: "A target can be specified using Citrix ADC policy expression" + type: string + responseCode: + description: "Default response code is 302, which can be customised using this attribute" + type: integer + minimum: 100 + maximum: 599 + oneOf: + - required: ["backend"] + - required: ["redirect"] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: continuousdeployments.citrix.com +{{- if .Values.crds.retainOnDelete }} + annotations: + "helm.sh/resource-policy": keep +{{- end }} +spec: + # group name to use for REST API: /apis// + group: citrix.com + # list of versions supported by this CustomResourceDefinition + version: v1 + # - name: v1 + # Each version can be enabled/disabled by Served flag. + # served: true + # One and only one version must be marked as the storage version. + #storage: true + # either Namespaced or Cluster + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Status + type: string + description: "Current Status of the CRD" + JSONPath: .status.state + - name: Message + type: string + description: "Status Message" + JSONPath: .status.status_message + names: + # plural name to be used in the URL: /apis/// + plural: continuousdeployments + # singular name to be used as an alias on the CLI and for display + singular: continuousdeployment + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: continuousDeploymentCustomConfig + # shortNames allow shorter string to match your resource on the CLI + shortNames: + - crd + + validation: + # openAPIV3Schema is the schema for validating custom objects. + openAPIV3Schema: + properties: + spec: + properties: + cronSpec: + type: integer +--- +{{- end }} \ No newline at end of file diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/citrix-k8s-cpx-ingress.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/citrix-k8s-cpx-ingress.yaml new file mode 100644 index 000000000..d920cb67c --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/citrix-k8s-cpx-ingress.yaml @@ -0,0 +1,221 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cpx-ingress +spec: + selector: + matchLabels: + app: cpx-ingress + replicas: 1 + template: + metadata: + name: cpx-ingress + labels: + app: cpx-ingress + annotations: + spec: + serviceAccountName: cpx-ingress-k8s-role + containers: + - name: cpx-ingress + image: "{{ .Values.image }}" + imagePullPolicy: {{ .Values.pullPolicy }} + securityContext: + privileged: true + env: + - name: "EULA" + value: "{{ .Values.license.accept }}" + - name: "KUBERNETES_TASK_ID" + value: "" +{{- if .Values.ADMSettings.licenseServerIP }} + - name: "LS_IP" + value: {{ .Values.ADMSettings.licenseServerIP | quote }} +{{- end }} +{{- if .Values.ADMSettings.licenseServerPort }} + - name: "LS_PORT" + value: {{ .Values.ADMSettings.licenseServerPort | quote }} +{{- end }} + - name: "MGMT_HTTP_PORT" + value: {{ .Values.mgmtHttpPort | quote }} + - name: "MGMT_HTTPS_PORT" + value: {{ .Values.mgmtHttpsPort | quote }} +{{- if .Values.ADMSettings.ADMIP }} + - name: "NS_MGMT_SERVER" + value: {{ .Values.ADMSettings.ADMIP | quote }} + - name: "NS_MGMT_FINGER_PRINT" + value: {{ .Values.ADMSettings.ADMFingerPrint | quote }} + - name: "NS_HTTP_PORT" + value: {{ .Values.mgmtHttpPort | quote }} + - name: "NS_HTTPS_PORT" + value: {{ .Values.mgmtHttpsPort | quote }} + - name: "LOGSTREAM_COLLECTOR_IP" + value: {{ .Values.ADMSettings.ADMIP | quote }} +{{- end }} +#To povision bandwidth based licensing to Citrix ADC CPX from ADM, needs bandwidth +{{- if and ( .Values.ADMSettings.licenseServerIP ) (eq .Values.ADMSettings.bandWidthLicense true) }} + - name: "BANDWIDTH" + value: {{ required "Mention bandwidth for bandwidth based licensing" .Values.ADMSettings.bandWidth | quote }} +{{- end }} +#for multiple-PE support, need to set CPX_CORES +{{- if .Values.ADMSettings.licenseServerIP }} +{{- if or (eq .Values.ADMSettings.vCPULicense true) (eq .Values.ADMSettings.bandWidthLicense true) }} + - name: "CPX_CORES" + value: {{ .Values.ADMSettings.cpxCores | default 1 | quote }} +{{- end }} +{{- end }} +{{- if or (.Values.ADMSettings.ADMIP) (.Values.ADMSettings.licenseServerIP) }} + - name: NS_MGMT_USER + valueFrom: + secretKeyRef: + name: {{ required "Provide Secret for ADM/LicenseServer credentials" .Values.ADMSettings.loginSecret }} + key: username + - name: NS_MGMT_PASS + valueFrom: + secretKeyRef: + name: {{ required "Provide Secret for ADM/LicenseServer credentials" .Values.ADMSettings.loginSecret }} + key: password +{{- end }} + volumeMounts: + - mountPath: /cpx/conf/ + name: cpx-volume1 + - mountPath: /cpx/crash/ + name: cpx-volume2 +{{- if .Values.cic.required }} + # Add cic as a sidecar + - name: cic + image: "{{ .Values.cic.image }}" + imagePullPolicy: {{ .Values.cic.pullPolicy }} + env: + - name: "EULA" + value: "{{ .Values.license.accept }}" + - name: "NS_IP" + value: "127.0.0.1" + - name: "NS_APPS_NAME_PREFIX" + value: {{ .Values.nsNamespace | default "k8s"}} + - name: "NS_DEPLOYMENT_MODE" + value: "SIDECAR" + - name: "NS_ENABLE_MONITORING" + value: "YES" + - name: "NS_USER" + valueFrom: + secretKeyRef: + name: cpxlogin + key: username + - name: "NS_PASSWORD" + valueFrom: + secretKeyRef: + name: cpxlogin + key: password +{{- if .Values.logProxy }} + - name: "NS_LOGPROXY" + value: {{ .Values.logProxy | quote }} +{{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace +{{- if .Values.kubernetesURL }} + - name: "kubernetes_url" + value: "{{ .Values.kubernetesURL }}" +{{- end }} + args: + - --configmap + {{ .Release.Namespace }}/cpx-cic-configmap +{{- if .Values.ingressClass }} + - --ingress-class +{{- range .Values.ingressClass}} + {{.}} +{{- end }} +{{- end }} +{{- if .Values.defaultSSLCert }} + - --default-ssl-certificate + {{ .Release.Namespace }}/{{ .Values.defaultSSLCert }} +{{- end }} +{{- end }} +{{- if .Values.exporter.required }} + - name: exporter + image: "{{ .Values.exporter.image }}" + imagePullPolicy: {{ .Values.exporter.pullPolicy }} + args: + - "--secure=no" + - "--target-nsip=127.0.0.1" + - "--port={{ .Values.exporter.ports.containerPort }}" + env: + - name: "NS_USER" + valueFrom: + secretKeyRef: + name: cpxlogin + key: username + - name: "NS_PASSWORD" + valueFrom: + secretKeyRef: + name: cpxlogin + key: password + securityContext: + readOnlyRootFilesystem: true +{{- end }} + volumes: + - name: cpx-volume1 + emptyDir: {} + - name: cpx-volume2 + emptyDir: {} +{{- if and .Values.nodeSelector.key .Values.nodeSelector.value }} + nodeSelector: + {{ .Values.nodeSelector.key }}: {{ .Values.nodeSelector.value }} +{{- end }} + +--- + +apiVersion: v1 +kind: Service +metadata: + name: cpx-service + labels: + app: cpx-service + service-type: citrix-adc-cpx-monitor +spec: + type: NodePort + ports: + - port: 80 + protocol: TCP + name: http + - port: 443 + protocol: TCP + name: https +{{- if .Values.exporter.required }} + - port: {{ .Values.exporter.ports.containerPort }} + targetPort: {{ .Values.exporter.ports.containerPort }} + name: exporter-port +{{- end }} + selector: + app: cpx-ingress + +--- + +{{- if .Values.exporter.required }} + +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: citrix-adc-cpx-servicemonitor + labels: + servicemonitor: citrix-adc-cpx +spec: + endpoints: + - interval: 30s + port: exporter-port + selector: + matchLabels: + service-type: citrix-adc-cpx-monitor + namespaceSelector: + matchNames: + - monitoring + - default + - {{ .Release.Namespace }} + +{{- end }} diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/configmap.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/configmap.yaml new file mode 100644 index 000000000..dd0c1bbee --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/configmap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cpx-cic-configmap + labels: + app: citrix-ingress-controller +data: + LOGLEVEL: {{ .Values.logLevel | quote | lower }} + NS_PROTOCOL: "http" + NS_PORT: "80" + NS_HTTP2_SERVER_SIDE: {{ .Values.http2ServerSide | quote | upper }} +{{- if .Values.coeConfig.required }} + NS_ANALYTICS_CONFIG: | + distributed_tracing: + enable: {{ .Values.coeConfig.distributedTracing.enable | quote }} + samplingrate: {{ .Values.coeConfig.distributedTracing.samplingrate }} + endpoint: + server: {{ include "analytics.server" . | quote }} + timeseries: + port: {{ .Values.coeConfig.timeseries.port }} + metrics: + enable: {{ .Values.coeConfig.timeseries.metrics.enable | quote }} + mode: {{ .Values.coeConfig.timeseries.metrics.mode | quote }} + auditlogs: + enable: {{ .Values.coeConfig.timeseries.auditlogs.enable | quote }} + events: + enable: {{ .Values.coeConfig.timeseries.events.enable | quote }} + transactions: + enable: {{ .Values.coeConfig.transactions.enable | quote }} + port: {{ .Values.coeConfig.transactions.port }} +{{- end }} diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/login_credentials.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/login_credentials.yaml new file mode 100644 index 000000000..0e22ef9dd --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/login_credentials.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cpxlogin +type: Opaque +data: + username: bnNyb290 + password: bnNyb290 diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/rbac.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/rbac.yaml new file mode 100644 index 000000000..66482380d --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/templates/rbac.yaml @@ -0,0 +1,73 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cpx-ingress-k8s-role +rules: + - apiGroups: [""] +{{- if .Values.openshift }} + resources: ["endpoints", "ingresses", "pods", "secrets", "routes", "tokenreviews", "subjectaccessreviews", "nodes", "namespaces", "configmaps"] +{{- else }} + resources: ["endpoints", "ingresses", "pods", "secrets", "routes", "nodes", "namespaces", "configmaps"] +{{- end}} + verbs: ["get", "list", "watch"] + # services/status is needed to update the loadbalancer IP in service status for integrating + # service of type LoadBalancer with external-dns + - apiGroups: [""] + resources: ["services/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] + - apiGroups: ["extensions","networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions","networking.k8s.io"] + resources: ["ingresses/status"] + verbs: ["patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch"] + - apiGroups: ["citrix.com"] + resources: ["rewritepolicies", "continuousdeployments", "authpolicies", "ratelimits", "listeners", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["citrix.com"] + resources: ["rewritepolicies/status", "continuousdeployments/status", "authpolicies/status", "ratelimits/status", "listeners/status", "httproutes/status"] + verbs: ["get", "list", "patch"] + - apiGroups: ["citrix.com"] + resources: ["vips"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["route.openshift.io"] + resources: ["routes"] + verbs: ["get", "list", "watch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: cpx-ingress-k8s-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cpx-ingress-k8s-role +subjects: +- kind: ServiceAccount + name: cpx-ingress-k8s-role + namespace: {{ .Release.Namespace }} +apiVersion: rbac.authorization.k8s.io/v1 + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cpx-ingress-k8s-role + namespace: {{ .Release.Namespace }} + +--- diff --git a/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/values.yaml b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/values.yaml new file mode 100644 index 000000000..cda67583c --- /dev/null +++ b/charts/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller/1.8.2800/values.yaml @@ -0,0 +1,81 @@ +# Default values for citrix-cpx-with-ingress-controller. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# cpximage contains information needed to fetch CPX image +image: quay.io/citrix/citrix-k8s-cpx-ingress:13.0-58.30 +pullPolicy: IfNotPresent +# cicimage contains information needed to fetch CIC image +cic: + image: quay.io/citrix/citrix-k8s-ingress-controller:1.8.28 + pullPolicy: IfNotPresent + required: true + +mgmtHttpPort: 9080 + +mgmtHttpsPort: 9443 +# openshift is set to true if charts are being deployed in OpenShift environment. +openshift: false +# nsNamespace is the prefix for the resources on the Citrix ADC +nsNamespace: +# license is used accept the terms of the Citrix license +license: + accept: no +# ingressClass is the name of the Ingress Class +ingressClass: +# logLevel is to set level of CIC Logs +logLevel: DEBUG +# Default SSL certificate +defaultSSLCert: +# Elasticsearch or Kafka or Zipkin endpoint for Citrix observability exporter +logProxy: +# Set to ON to enables HTTP2 for Citrix ADC service group configurations +http2ServerSide: "OFF" +nodeSelector: + key: + value: + +ADMSettings: + licenseServerIP: + licenseServerPort: 27000 + ADMIP: + ADMFingerPrint: + loginSecret: + bandWidthLicense: false + bandWidth: + vCPULicense: false + cpxCores: + +# exporter conatins information of prometheus-exporter +exporter: + required: false + image: quay.io/citrix/citrix-adc-metrics-exporter:1.4.4 + pullPolicy: IfNotPresent + ports: + containerPort: 8888 + +coeConfig: + required: false + distributedTracing: + enable: false + samplingrate: 100 + endpoint: + server: + timeseries: + port: 5563 + metrics: + enable: false + mode: 'avro' + auditlogs: + enable: false + events: + enable: false + transactions: + enable: false + port: 5557 + +crds: +# If false, CustomResourceDefinitions will not be installed. + install: true +# if set to true, then CustomResourceDefinitions will not be deleted during helm delete. This way, CustomResourceObjects will not be deleted from the database. + retainOnDelete: false diff --git a/charts/cloudcasa/cloudcasa/0.1.000/Chart.yaml b/charts/cloudcasa/cloudcasa/0.1.000/Chart.yaml new file mode 100644 index 000000000..3eaf82672 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/Chart.yaml @@ -0,0 +1,19 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/namespace: cloudcasa-io + catalog.cattle.io/release-name: cloudcasa +apiVersion: v2 +appVersion: 0.1.0 +description: CloudCasa backup service for Kubernetes and cloud native applications +home: https://cloudcasa.io +icon: https://partner-charts.rancher.io/assets/logos/cloudcasa.png +keywords: +- backup +- Catalogic +- CloudCasa +kubeVersion: '>=1.13.0-0' +maintainers: +- email: info@catalogicsoftware.com + name: catalogicsoftware +name: cloudcasa +version: 0.1.000 diff --git a/charts/cloudcasa/cloudcasa/0.1.000/README.md b/charts/cloudcasa/cloudcasa/0.1.000/README.md new file mode 100644 index 000000000..f829ec17e --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/README.md @@ -0,0 +1,51 @@ +# CloudCasa Kubernetes Agent + +[CloudCasa](https://cloudcasa.io) - A Smart Home in the Cloud for Kubernetes Backups + +# Introduction + +CloudCasa is a SaaS solution that provides class-leading data protection services for Kubernetes and cloud native applications. +Configuration is quick and easy, and basic service is free. + +This Helm chart installs and configures the CloudCasa agent on a Kubernetes cluster. +See the CloudCasa [Getting Started Guide](https://cloudcasa.io/get-started) for more information. + +## Prerequisites + +1. Kubernetes 1.17+ +2. Helm 3.0+ + +## Installation + +### Rancher Installation (Apps & Marketplace) + +1. Log in to https://home.cloudcasa.io and add your Kubernetes cluster under the Setup tab. Note the returned cluster ID. +2. Go to Apps & Marketplace in the Rancher UI. In the Deploy Chart section, check the Partners checkbox and click on the cloudcasa chart. +3. Provide a Name (e.g. CloudCasa) and optional description. +4. In the CloudCasa Configuration section, provide the Cluster ID obtained above. +5. Click on the Install button to complete installation of the agent. + +### Helm CLI Installation + +1. Log in to https://home.cloudcasa.io and add your Kubernetes cluster under the Setup tab. Note the returned cluster ID. +2. Execute the following helm commands, replacing `````` with the Cluster ID obtained above: +``` +$ helm repo add cloudcasa-repo https://catalogicsoftware.github.io/cloudcasa-helmchart +$ helm install cloudcasa.io cloudcasa-repo/cloudcasa-helmchart --set cluster_id= +``` +This will install the CloudCasa agent and complete registration of the cluster with the CloudCasa service. + +## Updating the CloudCasa Agent +1. Log in to https://home.cloudcasa.io and obtain the cluster ID for your cluster by selecting it under the Setup tab. +2. Execute the following commands to update the agent: +``` +$ helm repo update +$ helm upgrade cloudcasa.io cloudcasa-repo/cloudcasa-helmchart --set cluster_id= +``` + +## Uninstalling the CloudCasa Agent +``` +$ helm uninstall cloudcasa.io +``` + +*CloudCasa is a trademark of Catalogic Software Inc.* diff --git a/charts/cloudcasa/cloudcasa/0.1.000/app-readme.md b/charts/cloudcasa/cloudcasa/0.1.000/app-readme.md new file mode 100644 index 000000000..bae0de627 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/app-readme.md @@ -0,0 +1,6 @@ +# CloudCasa Kubernetes Agent + +### [CloudCasa](https://cloudcasa.io) - A Smart Home in the Cloud for Kubernetes Backups + +CloudCasa is a SaaS solution that provides class-leading data protection services for Kubernetes and cloud native applications. +Configuration is quick and easy, and basic service is free. See the CloudCasa [Getting Started Guide](https://cloudcasa.io/get-started) for more information. diff --git a/charts/cloudcasa/cloudcasa/0.1.000/questions.yaml b/charts/cloudcasa/cloudcasa/0.1.000/questions.yaml new file mode 100644 index 000000000..d4443f94f --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/questions.yaml @@ -0,0 +1,13 @@ +namespace: cloudcasa-io +labels: + io.rancher.certified: partner +categories: +- Application +questions: +- variable: cluster_id + default: "" + description: "CloudCasa Cluster ID" + type: string + required: true + label: CLUSTER ID + group: "CloudCasa Configuration" diff --git a/charts/cloudcasa/cloudcasa/0.1.000/templates/NOTES.txt b/charts/cloudcasa/cloudcasa/0.1.000/templates/NOTES.txt new file mode 100644 index 000000000..e30a0df5a --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/templates/NOTES.txt @@ -0,0 +1,5 @@ +--------Please be patient while the chart is being deployed-------- + +Tip: Watch the App deployment status using the command: kubectl get pods -n cloudcasa-io + +Monitor the Cloudcasa UI, the regsitered cluster state should be moved to Ready State. If the clutser state is still in Pendingstate, wrong ClusterID would have been provided. diff --git a/charts/cloudcasa/cloudcasa/0.1.000/templates/_helpers.tpl b/charts/cloudcasa/cloudcasa/0.1.000/templates/_helpers.tpl new file mode 100644 index 000000000..75bf8467f --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cloudcasa.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cloudcasa.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cloudcasa.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/cloudcasa/cloudcasa/0.1.000/templates/cluster-register.yaml b/charts/cloudcasa/cloudcasa/0.1.000/templates/cluster-register.yaml new file mode 100644 index 000000000..57552aa2e --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/templates/cluster-register.yaml @@ -0,0 +1,2982 @@ +# +# The velero part of the agent spec has been generated using the following +# command: +# +# velero install \ +# --features=EnableCSI \ +# --no-default-backup-location=true \ +# --namespace=cloudcasa-io \ +# --use-volume-snapshots=false \ +# --no-secret \ +# --plugins velero/velero-plugin-for-aws \ +# --dry-run -o yaml > ./intermediate-template.yaml +# ----------------------------------------------------------------------- +# Steps to delete the agent deployment are: +# ----------------------------------------------------------------------- +# kubectl delete namespace/cloudcasa-io clusterrolebinding/cloudcasa-io +# kubectl delete crds -l component=kubeagent_backup_helper + +{{ if not (lookup "v1" "Namespace" .Values.namespace "cloudcasa-io") }} + +apiVersion: v1 +kind: Namespace +metadata: + name: cloudcasa-io + +{{ end }} +--- +apiVersion: v1 +items: +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: backups.velero.io + spec: + group: velero.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Backup is a Velero resource that respresents the capture of Kubernetes cluster state at a point in time (API objects and associated volume state). + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the specification for a Velero backup. + properties: + defaultVolumesToRestic: + description: DefaultVolumesToRestic specifies whether restic should be used to take a backup of all pod volumes by default. + type: boolean + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of resources of specific Kind. The map key is the Kind name and value is a list of resource names separeted by commas. Each resource name has format "namespace/resourcename". For cluster resources, simply use "resourcename". + nullable: true + type: object + snapshotVolumes: + description: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. + type: string + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + status: + description: BackupStatus captures the current status of a Velero backup. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + errors: + description: Errors is a count of all error messages that were generated during execution of the backup. The actual errors are in the backup's log file in object storage. + type: integer + expiration: + description: Expiration is when this Backup is eligible for garbage-collection. + format: date-time + nullable: true + type: string + formatVersion: + description: FormatVersion is the backup format version, including major, minor, and patch version. + type: string + phase: + description: Phase is the current state of the Backup. + enum: + - New + - FailedValidation + - InProgress + - Completed + - PartiallyFailed + - Failed + - Deleting + type: string + progress: + description: Progress contains information about the backup's execution progress. Note that this information is best-effort only -- if Velero fails to update it during a backup for any reason, it may be inaccurate/stale. + nullable: true + properties: + itemsBackedUp: + description: ItemsBackedUp is the number of items that have actually been written to the backup tarball so far. + type: integer + totalItems: + description: TotalItems is the total number of items to be backed up. This number may change throughout the execution of the backup due to plugins that return additional related items to back up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed. + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable). + items: + type: string + nullable: true + type: array + version: + description: 'Version is the backup format major version. Deprecated: Please see FormatVersion' + type: integer + volumeSnapshotsAttempted: + description: VolumeSnapshotsAttempted is the total number of attempted volume snapshots for this backup. + type: integer + volumeSnapshotsCompleted: + description: VolumeSnapshotsCompleted is the total number of successfully completed volume snapshots for this backup. + type: integer + warnings: + description: Warnings is a count of all warning messages that were generated during execution of the backup. The actual warnings are in the backup's log file in object storage. + type: integer + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: backupstoragelocations.velero.io + spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Backup Storage Location status such as Available/Unavailable + name: Phase + type: string + - JSONPath: .status.lastValidationTime + description: LastValidationTime is the last time the backup store location was validated + name: Last Validated + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: velero.io + names: + kind: BackupStorageLocation + listKind: BackupStorageLocationList + plural: backupstoragelocations + shortNames: + - bsl + singular: backupstoragelocation + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: BackupStorageLocation is a location where Velero stores backup objects + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupStorageLocationSpec defines the desired state of a Velero BackupStorageLocation + properties: + accessMode: + description: AccessMode defines the permissions for the backup storage location. + enum: + - ReadOnly + - ReadWrite + type: string + backupSyncPeriod: + description: BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync. + nullable: true + type: string + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + objectStorage: + description: ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage. + properties: + bucket: + description: Bucket is the bucket to use for object storage. + type: string + caCert: + description: CACert defines a CA bundle to use when verifying TLS connections to the provider. + format: byte + type: string + prefix: + description: Prefix is the path inside a bucket to use for Velero storage. Optional. + type: string + required: + - bucket + type: object + provider: + description: Provider is the provider of the backup storage. + type: string + validationFrequency: + description: ValidationFrequency defines how frequently to validate the corresponding object storage. A value of 0 disables validation. + nullable: true + type: string + required: + - objectStorage + - provider + type: object + status: + description: BackupStorageLocationStatus defines the observed state of BackupStorageLocation + properties: + accessMode: + description: "AccessMode is an unused field. \n Deprecated: there is now an AccessMode field on the Spec and this field will be removed entirely as of v2.0." + enum: + - ReadOnly + - ReadWrite + type: string + lastSyncedRevision: + description: "LastSyncedRevision is the value of the `metadata/revision` file in the backup storage location the last time the BSL's contents were synced into the cluster. \n Deprecated: this field is no longer updated or used for detecting changes to the location's contents and will be removed entirely in v2.0." + type: string + lastSyncedTime: + description: LastSyncedTime is the last time the contents of the location were synced into the cluster. + format: date-time + nullable: true + type: string + lastValidationTime: + description: LastValidationTime is the last time the backup store location was validated the cluster. + format: date-time + nullable: true + type: string + phase: + description: Phase is the current state of the BackupStorageLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: deletebackuprequests.velero.io + spec: + group: velero.io + names: + kind: DeleteBackupRequest + listKind: DeleteBackupRequestList + plural: deletebackuprequests + singular: deletebackuprequest + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: DeleteBackupRequest is a request to delete one or more backups. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DeleteBackupRequestSpec is the specification for which backups to delete. + properties: + backupName: + type: string + required: + - backupName + type: object + status: + description: DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. + properties: + errors: + description: Errors contains any errors that were encountered during the deletion process. + items: + type: string + nullable: true + type: array + phase: + description: Phase is the current state of the DeleteBackupRequest. + enum: + - New + - InProgress + - Processed + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: downloadrequests.velero.io + spec: + group: velero.io + names: + kind: DownloadRequest + listKind: DownloadRequestList + plural: downloadrequests + singular: downloadrequest + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: DownloadRequest is a request to download an artifact from backup object storage, such as a backup log file. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DownloadRequestSpec is the specification for a download request. + properties: + target: + description: Target is what to download (e.g. logs for a backup). + properties: + kind: + description: Kind is the type of file to download. + enum: + - BackupLog + - BackupContents + - BackupVolumeSnapshots + - BackupResourceList + - RestoreLog + - RestoreResults + type: string + name: + description: Name is the name of the kubernetes resource with which the file is associated. + type: string + required: + - kind + - name + type: object + required: + - target + type: object + status: + description: DownloadRequestStatus is the current status of a DownloadRequest. + properties: + downloadURL: + description: DownloadURL contains the pre-signed URL for the target file. + type: string + expiration: + description: Expiration is when this DownloadRequest expires and can be deleted by the system. + format: date-time + nullable: true + type: string + phase: + description: Phase is the current state of the DownloadRequest. + enum: + - New + - Processed + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: podvolumebackups.velero.io + spec: + group: velero.io + names: + kind: PodVolumeBackup + listKind: PodVolumeBackupList + plural: podvolumebackups + singular: podvolumebackup + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeBackupSpec is the specification for a PodVolumeBackup. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage location where the restic repository is stored. + type: string + node: + description: Node is the name of the node that the Pod is running on. + type: string + pod: + description: Pod is a reference to the pod containing the volume to be backed up. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + repoIdentifier: + description: RepoIdentifier is the restic repository identifier. + type: string + tags: + additionalProperties: + type: string + description: Tags are a map of key-value pairs that should be applied to the volume backup as tags. + type: object + volume: + description: Volume is the name of the volume within the Pod to be backed up. + type: string + required: + - backupStorageLocation + - node + - pod + - repoIdentifier + - volume + type: object + status: + description: PodVolumeBackupStatus is the current status of a PodVolumeBackup. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume backup's status. + type: string + path: + description: Path is the full path within the controller pod being backed up. + type: string + phase: + description: Phase is the current state of the PodVolumeBackup. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the volume and the current number of backed up bytes. This can be used to display progress information about the backup operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + snapshotID: + description: SnapshotID is the identifier for the snapshot of the pod volume. + type: string + startTimestamp: + description: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: podvolumerestores.velero.io + spec: + group: velero.io + names: + kind: PodVolumeRestore + listKind: PodVolumeRestoreList + plural: podvolumerestores + singular: podvolumerestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeRestoreSpec is the specification for a PodVolumeRestore. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage location where the restic repository is stored. + type: string + pod: + description: Pod is a reference to the pod containing the volume to be restored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + repoIdentifier: + description: RepoIdentifier is the restic repository identifier. + type: string + snapshotID: + description: SnapshotID is the ID of the volume snapshot to be restored. + type: string + volume: + description: Volume is the name of the volume within the Pod to be restored. + type: string + required: + - backupStorageLocation + - pod + - repoIdentifier + - snapshotID + - volume + type: object + status: + description: PodVolumeRestoreStatus is the current status of a PodVolumeRestore. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a restore was completed. Completion time is recorded even on failed restores. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume restore's status. + type: string + phase: + description: Phase is the current state of the PodVolumeRestore. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the snapshot and the current number of restored bytes. This can be used to display progress information about the restore operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a restore was started. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: resticrepositories.velero.io + spec: + group: velero.io + names: + kind: ResticRepository + listKind: ResticRepositoryList + plural: resticrepositories + singular: resticrepository + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ResticRepositorySpec is the specification for a ResticRepository. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the BackupStorageLocation that should contain this repository. + type: string + maintenanceFrequency: + description: MaintenanceFrequency is how often maintenance should be run. + type: string + resticIdentifier: + description: ResticIdentifier is the full restic-compatible string for identifying this repository. + type: string + volumeNamespace: + description: VolumeNamespace is the namespace this restic repository contains pod volume backups for. + type: string + required: + - backupStorageLocation + - maintenanceFrequency + - resticIdentifier + - volumeNamespace + type: object + status: + description: ResticRepositoryStatus is the current status of a ResticRepository. + properties: + lastMaintenanceTime: + description: LastMaintenanceTime is the last time maintenance was run. + format: date-time + nullable: true + type: string + message: + description: Message is a message about the current status of the ResticRepository. + type: string + phase: + description: Phase is the current state of the ResticRepository. + enum: + - New + - Ready + - NotReady + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: restores.velero.io + spec: + group: velero.io + names: + kind: Restore + listKind: RestoreList + plural: restores + singular: restore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Restore is a Velero resource that represents the application of resources from a Velero backup to a target Kubernetes cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSpec defines the specification for a Velero restore. + properties: + backupName: + description: BackupName is the unique name of the Velero backup to restore from. + type: string + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the restore. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the restore. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed during or post restore. + properties: + resources: + items: + description: RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + postHooks: + description: PostHooks is a list of RestoreResourceHooks to execute during and after restoring a resource. + items: + description: RestoreResourceHook defines a restore hook for a resource. + properties: + exec: + description: Exec defines an exec restore hook. + properties: + command: + description: Command is the command and arguments to execute from within a container after a pod has been restored. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + execTimeout: + description: ExecTimeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + waitTimeout: + description: WaitTimeout defines the maximum amount of time Velero should wait for the container to be Ready before attempting to run the command. + type: string + required: + - command + type: object + init: + description: Init defines an init restore hook. + properties: + initContainers: + description: InitContainers is list of init containers to be added to a pod during its restore. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the initContainers to complete. + type: string + type: object + type: object + type: array + required: + - name + type: object + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the restore. If null, defaults to true. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the restore. If empty, all resources in the backup are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when restoring individual objects from the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceMapping: + additionalProperties: + type: string + description: NamespaceMapping is a map of source namespace names to target namespace names to restore into. Any source namespaces not included in the map will be restored into namespaces of the same name. + type: object + restorePVs: + description: RestorePVs specifies whether to restore all included PVs from snapshot (via the cloudprovider). + nullable: true + type: boolean + scheduleName: + description: ScheduleName is the unique name of the Velero schedule to restore from. If specified, and BackupName is empty, Velero will restore from the most recent successful backup created from this schedule. + type: string + required: + - backupName + type: object + status: + description: RestoreStatus captures the current status of a Velero restore + properties: + completionTimestamp: + description: CompletionTimestamp records the time the restore operation was completed. Completion time is recorded even on failed restore. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + errors: + description: Errors is a count of all error messages that were generated during execution of the restore. The actual errors are stored in object storage. + type: integer + failureReason: + description: FailureReason is an error that caused the entire restore to fail. + type: string + phase: + description: Phase is the current state of the Restore + enum: + - New + - FailedValidation + - InProgress + - Completed + - PartiallyFailed + - Failed + type: string + startTimestamp: + description: StartTimestamp records the time the restore operation was started. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable) + items: + type: string + nullable: true + type: array + warnings: + description: Warnings is a count of all warning messages that were generated during execution of the restore. The actual warnings are stored in object storage. + type: integer + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: schedules.velero.io + spec: + group: velero.io + names: + kind: Schedule + listKind: ScheduleList + plural: schedules + singular: schedule + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Schedule is a Velero resource that represents a pre-scheduled or periodic Backup that should be run. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the specification for a Velero schedule + properties: + schedule: + description: Schedule is a Cron expression defining when to run the Backup. + type: string + template: + description: Template is the definition of the Backup to be run on the provided schedule + properties: + defaultVolumesToRestic: + description: DefaultVolumesToRestic specifies whether restic should be used to take a backup of all pod volumes by default. + type: boolean + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of resources of specific Kind. The map key is the Kind name and value is a list of resource names separeted by commas. Each resource name has format "namespace/resourcename". For cluster resources, simply use "resourcename". + nullable: true + type: object + snapshotVolumes: + description: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. + type: string + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + required: + - schedule + - template + type: object + status: + description: ScheduleStatus captures the current state of a Velero schedule + properties: + lastBackup: + description: LastBackup is the last time a Backup was run for this Schedule schedule + format: date-time + nullable: true + type: string + phase: + description: Phase is the current phase of the Schedule + enum: + - New + - Enabled + - FailedValidation + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable) + items: + type: string + type: array + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: serverstatusrequests.velero.io + spec: + group: velero.io + names: + kind: ServerStatusRequest + listKind: ServerStatusRequestList + plural: serverstatusrequests + shortNames: + - ssr + singular: serverstatusrequest + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ServerStatusRequest is a request to access current status information about the Velero server. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ServerStatusRequestSpec is the specification for a ServerStatusRequest. + type: object + status: + description: ServerStatusRequestStatus is the current status of a ServerStatusRequest. + properties: + phase: + description: Phase is the current lifecycle phase of the ServerStatusRequest. + enum: + - New + - Processed + type: string + plugins: + description: Plugins list information about the plugins running on the Velero server + items: + description: PluginInfo contains attributes of a Velero plugin + properties: + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + nullable: true + type: array + processedTimestamp: + description: ProcessedTimestamp is when the ServerStatusRequest was processed by the ServerStatusRequestController. + format: date-time + nullable: true + type: string + serverVersion: + description: ServerVersion is the Velero server version. + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: volumesnapshotlocations.velero.io + spec: + group: velero.io + names: + kind: VolumeSnapshotLocation + listKind: VolumeSnapshotLocationList + plural: volumesnapshotlocations + singular: volumesnapshotlocation + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: VolumeSnapshotLocation is a location where Velero stores volume snapshots. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation. + properties: + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + provider: + description: Provider is the provider of the volume storage. + type: string + required: + - provider + type: object + status: + description: VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation. + properties: + phase: + description: VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: v1 + kind: ServiceAccount + metadata: + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: {{ .Release.Name }} + namespace: {{ .Values.namespace }} +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: {{ .Release.Name }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: {{ .Release.Name }} + namespace: {{ .Values.namespace }} +kind: List +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .driver + name: Driver + type: string + - JSONPath: .deletionPolicy + description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass + should be deleted when its bound VolumeSnapshot is deleted. + name: DeletionPolicy + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + preserveUnknownFields: false + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot in bytes + name: RestoreSize + type: integer + - JSONPath: .spec.deletionPolicy + description: Determines whether this VolumeSnapshotContent and its physical snapshot + on the underlying storage system should be deleted when its bound VolumeSnapshot + is deleted. + name: DeletionPolicy + type: string + - JSONPath: .spec.driver + description: Name of the CSI driver used to create the physical snapshot on the + underlying storage system. + name: Driver + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: Name of the VolumeSnapshotClass to which this snapshot belongs. + name: VolumeSnapshotClass + type: string + - JSONPath: .spec.volumeSnapshotRef.name + description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + name: VolumeSnapshot + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .spec.source.persistentVolumeClaimName + description: Name of the source PVC from where a dynamically taken snapshot will + be created. + name: SourcePVC + type: string + - JSONPath: .spec.source.volumeSnapshotContentName + description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + name: SourceSnapshotContent + type: string + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot. + name: RestoreSize + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + name: SnapshotClass + type: string + - JSONPath: .status.boundVolumeSnapshotContentName + description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + name: SnapshotContent + type: string + - JSONPath: .status.creationTime + description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + name: CreationTime + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubeagent + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: kubeagent + strategy: {} + replicas: 1 + template: + metadata: + labels: + app: kubeagent + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8085" + prometheus.io/scrape: "true" + spec: + containers: + - image: "{{ .Values.kagent_image.repository }}:{{ .Values.kagent_image.tag }}" + args: ["/usr/local/bin/kubeagent", "--server_addr", "agent.cloudcasa.io:443", "--tls", "true"] + name: kubeagent + resources: + requests: + memory: "{{ .Values.request_kubeagent_memory }}" + cpu: "{{ .Values.request_kubeagent_cpu }}" + limits: + memory: "{{ .Values.limit_kubeagent_memory }}" + cpu: "{{ .Values.limit_kubeagent_cpu }}" + env: + - name: AMDS_CLUSTER_ID + value: "{{ .Values.cluster_id }}" + volumeMounts: + - mountPath: /credentials + name: cloud-credentials + - mountPath: /plugins + name: plugins + - mountPath: /scratch + name: scratch + - image: "{{ .Values.velero_image.repository }}:{{ .Values.velero_image.tag }}" + imagePullPolicy: IfNotPresent + name: kubeagent-backup-helper + args: + - server + - --features=EnableCSI,EnableAPIGroupVersions + - --backup-sync-period=0s + - --store-validation-frequency=0s + # (24 hours * 365 days * 10 years) + (2 * 24 hours[for leap years]) = 87648 hours + - --default-backup-ttl=87648h0m0s + - --disable-controllers=backup-sync,schedule,gc,download-request,restic-repo,server-status-request + - --log-format=json + - --log-level=info + command: + - /velero + env: + - name: VELERO_SCRATCH_DIR + value: /scratch + - name: VELERO_NAMESPACE + value: {{ .Values.namespace }} + - name: LD_LIBRARY_PATH + value: /plugins + - name: AWS_SHARED_CREDENTIALS_FILE + value: /credentials/s3/cloud + ports: + - containerPort: 8085 + name: metrics + resources: + limits: + cpu: "{{ .Values.limit_velero_cpu }}" + memory: "{{ .Values.limit_velero_memory }}" + requests: + cpu: "{{ .Values.request_velero_cpu }}" + memory: "{{ .Values.request_velero_memory }}" + volumeMounts: + - mountPath: /plugins + name: plugins + - mountPath: /scratch + name: scratch + - mountPath: /credentials + name: cloud-credentials + initContainers: + - image: "{{ .Values.velero_aws_plugin_image.repository }}:{{ .Values.velero_aws_plugin_image.tag }}" + imagePullPolicy: IfNotPresent + name: velero-plugin-for-aws + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + - image: "{{ .Values.velero_csi_plugin_image.repository }}:{{ .Values.velero_csi_plugin_image.tag }}" + imagePullPolicy: IfNotPresent + name: velero-plugin-for-csi + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + restartPolicy: Always + serviceAccountName: {{ .Release.Name }} + volumes: + - emptyDir: {} + name: plugins + - emptyDir: {} + name: scratch + - emptyDir: {} + name: cloud-credentials diff --git a/charts/cloudcasa/cloudcasa/0.1.000/values.yaml b/charts/cloudcasa/cloudcasa/0.1.000/values.yaml new file mode 100644 index 000000000..8ce3850b6 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/0.1.000/values.yaml @@ -0,0 +1,92 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry +## + +## Required namespace +namespace: cloudcasa-io + +## Velero csi plugin image +## ref: https://hub.docker.com/r/velero/velero-plugin-for-csi/tags/ +velero_csi_plugin_image: + registry: docker.io + repository: catalogicsoftware/velero-plugin-for-csi + tag: v0.1.2.1 + pullPolicy: IfNotPresent + debug: false +## + +## Velero aws plugin image +## ref: https://hub.docker.com/r/velero/velero-plugin-for-aws/tags/ +velero_aws_plugin_image: + registry: docker.io + repository: velero/velero-plugin-for-aws + tag: v1.1.0 + pullPolicy: IfNotPresent + debug: false +## + +## Velero image +## ref: https://hub.docker.com/r/velero/velero/tags +velero_image: + registry: docker.io + repository: velero/velero + tag: v1.5.3 + pullPolicy: IfNotPresent + debug: false +## + +## Cloudcasa kubeagent image +## ref: https://hub.docker.com/r/catalogicsoftware/amds-kagent/tags/ +kagent_image: + registry: docker.io + repository: catalogicsoftware/amds-kagent + tag: 0.1.0-rc.515 + pullPolicy: IfNotPresent + debug: false +## + +## Cloudcasa AMDS Cluster ID. To be provided by the user +cluster_id: "" + +## Resources to be used by velero and kubeagent pods +request_velero_memory: 128Mi +request_velero_cpu: 500m +limit_velero_memory: 512Mi +limit_velero_cpu: 1 +request_kubeagent_memory: 32Mi +request_kubeagent_cpu: 250m +limit_kubeagent_memory: 64Mi +limit_kubeagent_cpu: 500m + +## Placeholder configuration below this line ## +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + annotations: {} + name: "" + +podAnnotations: {} + +podSecurityContext: {} + +securityContext: {} + +resources: {} + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 1 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} +## diff --git a/charts/cloudcasa/cloudcasa/1/Chart.yaml b/charts/cloudcasa/cloudcasa/1/Chart.yaml new file mode 100644 index 000000000..e3f23b798 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Cloudcasa + catalog.cattle.io/namespace: cloudcasa-io + catalog.cattle.io/release-name: cloudcasa +apiVersion: v2 +appVersion: "1.0" +description: CloudCasa backup service for Kubernetes and cloud native applications +home: https://cloudcasa.io +icon: https://partner-charts.rancher.io/assets/logos/cloudcasa.png +keywords: +- backup +- Catalogic +- CloudCasa +kubeVersion: '>=1.13.0-0' +maintainers: +- email: info@catalogicsoftware.com + name: catalogicsoftware +name: cloudcasa +version: "1" diff --git a/charts/cloudcasa/cloudcasa/1/README.md b/charts/cloudcasa/cloudcasa/1/README.md new file mode 100644 index 000000000..f829ec17e --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/README.md @@ -0,0 +1,51 @@ +# CloudCasa Kubernetes Agent + +[CloudCasa](https://cloudcasa.io) - A Smart Home in the Cloud for Kubernetes Backups + +# Introduction + +CloudCasa is a SaaS solution that provides class-leading data protection services for Kubernetes and cloud native applications. +Configuration is quick and easy, and basic service is free. + +This Helm chart installs and configures the CloudCasa agent on a Kubernetes cluster. +See the CloudCasa [Getting Started Guide](https://cloudcasa.io/get-started) for more information. + +## Prerequisites + +1. Kubernetes 1.17+ +2. Helm 3.0+ + +## Installation + +### Rancher Installation (Apps & Marketplace) + +1. Log in to https://home.cloudcasa.io and add your Kubernetes cluster under the Setup tab. Note the returned cluster ID. +2. Go to Apps & Marketplace in the Rancher UI. In the Deploy Chart section, check the Partners checkbox and click on the cloudcasa chart. +3. Provide a Name (e.g. CloudCasa) and optional description. +4. In the CloudCasa Configuration section, provide the Cluster ID obtained above. +5. Click on the Install button to complete installation of the agent. + +### Helm CLI Installation + +1. Log in to https://home.cloudcasa.io and add your Kubernetes cluster under the Setup tab. Note the returned cluster ID. +2. Execute the following helm commands, replacing `````` with the Cluster ID obtained above: +``` +$ helm repo add cloudcasa-repo https://catalogicsoftware.github.io/cloudcasa-helmchart +$ helm install cloudcasa.io cloudcasa-repo/cloudcasa-helmchart --set cluster_id= +``` +This will install the CloudCasa agent and complete registration of the cluster with the CloudCasa service. + +## Updating the CloudCasa Agent +1. Log in to https://home.cloudcasa.io and obtain the cluster ID for your cluster by selecting it under the Setup tab. +2. Execute the following commands to update the agent: +``` +$ helm repo update +$ helm upgrade cloudcasa.io cloudcasa-repo/cloudcasa-helmchart --set cluster_id= +``` + +## Uninstalling the CloudCasa Agent +``` +$ helm uninstall cloudcasa.io +``` + +*CloudCasa is a trademark of Catalogic Software Inc.* diff --git a/charts/cloudcasa/cloudcasa/1/app-readme.md b/charts/cloudcasa/cloudcasa/1/app-readme.md new file mode 100644 index 000000000..bae0de627 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/app-readme.md @@ -0,0 +1,6 @@ +# CloudCasa Kubernetes Agent + +### [CloudCasa](https://cloudcasa.io) - A Smart Home in the Cloud for Kubernetes Backups + +CloudCasa is a SaaS solution that provides class-leading data protection services for Kubernetes and cloud native applications. +Configuration is quick and easy, and basic service is free. See the CloudCasa [Getting Started Guide](https://cloudcasa.io/get-started) for more information. diff --git a/charts/cloudcasa/cloudcasa/1/questions.yaml b/charts/cloudcasa/cloudcasa/1/questions.yaml new file mode 100644 index 000000000..f2f1f51de --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/questions.yaml @@ -0,0 +1,13 @@ +namespace: cloudcasa-io +labels: + io.rancher.certified: partner +categories: +- Application +questions: +- variable: cluster_id + default: "default" + description: "CloudCasa Cluster ID" + type: string + required: true + label: CLUSTER ID + group: "CloudCasa Configuration" diff --git a/charts/cloudcasa/cloudcasa/1/templates/NOTES.txt b/charts/cloudcasa/cloudcasa/1/templates/NOTES.txt new file mode 100644 index 000000000..e30a0df5a --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/templates/NOTES.txt @@ -0,0 +1,5 @@ +--------Please be patient while the chart is being deployed-------- + +Tip: Watch the App deployment status using the command: kubectl get pods -n cloudcasa-io + +Monitor the Cloudcasa UI, the regsitered cluster state should be moved to Ready State. If the clutser state is still in Pendingstate, wrong ClusterID would have been provided. diff --git a/charts/cloudcasa/cloudcasa/1/templates/_helpers.tpl b/charts/cloudcasa/cloudcasa/1/templates/_helpers.tpl new file mode 100644 index 000000000..75bf8467f --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cloudcasa.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cloudcasa.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cloudcasa.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/cloudcasa/cloudcasa/1/templates/cluster-register.yaml b/charts/cloudcasa/cloudcasa/1/templates/cluster-register.yaml new file mode 100644 index 000000000..aae09c659 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/templates/cluster-register.yaml @@ -0,0 +1,2984 @@ +# ----------------------------------------------------------------------- +# Steps to delete the agent deployment are: +# ----------------------------------------------------------------------- +# kubectl delete namespace/cloudcasa-io clusterrolebinding/cloudcasa-io +# kubectl delete crds -l component=kubeagent_backup_helper + +{{ if not (lookup "v1" "Namespace" .Values.namespace "cloudcasa-io") }} +apiVersion: v1 +kind: Namespace +metadata: + name: cloudcasa-io + annotations: + "helm.sh/resource-policy": keep +{{ end }} +--- +apiVersion: v1 +kind: List +items: +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: backups.velero.io + spec: + group: velero.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Backup is a Velero resource that respresents the capture of Kubernetes cluster state at a point in time (API objects and associated volume state). + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the specification for a Velero backup. + properties: + defaultVolumesToRestic: + description: DefaultVolumesToRestic specifies whether restic should be used to take a backup of all pod volumes by default. + type: boolean + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of resources of specific Kind. The map key is the Kind name and value is a list of resource names separeted by commas. Each resource name has format "namespace/resourcename". For cluster resources, simply use "resourcename". + nullable: true + type: object + snapshotVolumes: + description: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. + type: string + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + status: + description: BackupStatus captures the current status of a Velero backup. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + errors: + description: Errors is a count of all error messages that were generated during execution of the backup. The actual errors are in the backup's log file in object storage. + type: integer + expiration: + description: Expiration is when this Backup is eligible for garbage-collection. + format: date-time + nullable: true + type: string + formatVersion: + description: FormatVersion is the backup format version, including major, minor, and patch version. + type: string + phase: + description: Phase is the current state of the Backup. + enum: + - New + - FailedValidation + - InProgress + - Completed + - PartiallyFailed + - Failed + - Deleting + type: string + progress: + description: Progress contains information about the backup's execution progress. Note that this information is best-effort only -- if Velero fails to update it during a backup for any reason, it may be inaccurate/stale. + nullable: true + properties: + itemsBackedUp: + description: ItemsBackedUp is the number of items that have actually been written to the backup tarball so far. + type: integer + totalItems: + description: TotalItems is the total number of items to be backed up. This number may change throughout the execution of the backup due to plugins that return additional related items to back up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed. + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable). + items: + type: string + nullable: true + type: array + version: + description: 'Version is the backup format major version. Deprecated: Please see FormatVersion' + type: integer + volumeSnapshotsAttempted: + description: VolumeSnapshotsAttempted is the total number of attempted volume snapshots for this backup. + type: integer + volumeSnapshotsCompleted: + description: VolumeSnapshotsCompleted is the total number of successfully completed volume snapshots for this backup. + type: integer + warnings: + description: Warnings is a count of all warning messages that were generated during execution of the backup. The actual warnings are in the backup's log file in object storage. + type: integer + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: backupstoragelocations.velero.io + spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Backup Storage Location status such as Available/Unavailable + name: Phase + type: string + - JSONPath: .status.lastValidationTime + description: LastValidationTime is the last time the backup store location was validated + name: Last Validated + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: velero.io + names: + kind: BackupStorageLocation + listKind: BackupStorageLocationList + plural: backupstoragelocations + shortNames: + - bsl + singular: backupstoragelocation + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: BackupStorageLocation is a location where Velero stores backup objects + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupStorageLocationSpec defines the desired state of a Velero BackupStorageLocation + properties: + accessMode: + description: AccessMode defines the permissions for the backup storage location. + enum: + - ReadOnly + - ReadWrite + type: string + backupSyncPeriod: + description: BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync. + nullable: true + type: string + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + objectStorage: + description: ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage. + properties: + bucket: + description: Bucket is the bucket to use for object storage. + type: string + caCert: + description: CACert defines a CA bundle to use when verifying TLS connections to the provider. + format: byte + type: string + prefix: + description: Prefix is the path inside a bucket to use for Velero storage. Optional. + type: string + required: + - bucket + type: object + provider: + description: Provider is the provider of the backup storage. + type: string + validationFrequency: + description: ValidationFrequency defines how frequently to validate the corresponding object storage. A value of 0 disables validation. + nullable: true + type: string + required: + - objectStorage + - provider + type: object + status: + description: BackupStorageLocationStatus defines the observed state of BackupStorageLocation + properties: + accessMode: + description: "AccessMode is an unused field. \n Deprecated: there is now an AccessMode field on the Spec and this field will be removed entirely as of v2.0." + enum: + - ReadOnly + - ReadWrite + type: string + lastSyncedRevision: + description: "LastSyncedRevision is the value of the `metadata/revision` file in the backup storage location the last time the BSL's contents were synced into the cluster. \n Deprecated: this field is no longer updated or used for detecting changes to the location's contents and will be removed entirely in v2.0." + type: string + lastSyncedTime: + description: LastSyncedTime is the last time the contents of the location were synced into the cluster. + format: date-time + nullable: true + type: string + lastValidationTime: + description: LastValidationTime is the last time the backup store location was validated the cluster. + format: date-time + nullable: true + type: string + phase: + description: Phase is the current state of the BackupStorageLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: deletebackuprequests.velero.io + spec: + group: velero.io + names: + kind: DeleteBackupRequest + listKind: DeleteBackupRequestList + plural: deletebackuprequests + singular: deletebackuprequest + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: DeleteBackupRequest is a request to delete one or more backups. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DeleteBackupRequestSpec is the specification for which backups to delete. + properties: + backupName: + type: string + required: + - backupName + type: object + status: + description: DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. + properties: + errors: + description: Errors contains any errors that were encountered during the deletion process. + items: + type: string + nullable: true + type: array + phase: + description: Phase is the current state of the DeleteBackupRequest. + enum: + - New + - InProgress + - Processed + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: downloadrequests.velero.io + spec: + group: velero.io + names: + kind: DownloadRequest + listKind: DownloadRequestList + plural: downloadrequests + singular: downloadrequest + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: DownloadRequest is a request to download an artifact from backup object storage, such as a backup log file. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DownloadRequestSpec is the specification for a download request. + properties: + target: + description: Target is what to download (e.g. logs for a backup). + properties: + kind: + description: Kind is the type of file to download. + enum: + - BackupLog + - BackupContents + - BackupVolumeSnapshots + - BackupResourceList + - RestoreLog + - RestoreResults + type: string + name: + description: Name is the name of the kubernetes resource with which the file is associated. + type: string + required: + - kind + - name + type: object + required: + - target + type: object + status: + description: DownloadRequestStatus is the current status of a DownloadRequest. + properties: + downloadURL: + description: DownloadURL contains the pre-signed URL for the target file. + type: string + expiration: + description: Expiration is when this DownloadRequest expires and can be deleted by the system. + format: date-time + nullable: true + type: string + phase: + description: Phase is the current state of the DownloadRequest. + enum: + - New + - Processed + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: podvolumebackups.velero.io + spec: + group: velero.io + names: + kind: PodVolumeBackup + listKind: PodVolumeBackupList + plural: podvolumebackups + singular: podvolumebackup + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeBackupSpec is the specification for a PodVolumeBackup. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage location where the restic repository is stored. + type: string + node: + description: Node is the name of the node that the Pod is running on. + type: string + pod: + description: Pod is a reference to the pod containing the volume to be backed up. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + repoIdentifier: + description: RepoIdentifier is the restic repository identifier. + type: string + tags: + additionalProperties: + type: string + description: Tags are a map of key-value pairs that should be applied to the volume backup as tags. + type: object + volume: + description: Volume is the name of the volume within the Pod to be backed up. + type: string + required: + - backupStorageLocation + - node + - pod + - repoIdentifier + - volume + type: object + status: + description: PodVolumeBackupStatus is the current status of a PodVolumeBackup. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume backup's status. + type: string + path: + description: Path is the full path within the controller pod being backed up. + type: string + phase: + description: Phase is the current state of the PodVolumeBackup. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the volume and the current number of backed up bytes. This can be used to display progress information about the backup operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + snapshotID: + description: SnapshotID is the identifier for the snapshot of the pod volume. + type: string + startTimestamp: + description: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: podvolumerestores.velero.io + spec: + group: velero.io + names: + kind: PodVolumeRestore + listKind: PodVolumeRestoreList + plural: podvolumerestores + singular: podvolumerestore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PodVolumeRestoreSpec is the specification for a PodVolumeRestore. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the backup storage location where the restic repository is stored. + type: string + pod: + description: Pod is a reference to the pod containing the volume to be restored. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + repoIdentifier: + description: RepoIdentifier is the restic repository identifier. + type: string + snapshotID: + description: SnapshotID is the ID of the volume snapshot to be restored. + type: string + volume: + description: Volume is the name of the volume within the Pod to be restored. + type: string + required: + - backupStorageLocation + - pod + - repoIdentifier + - snapshotID + - volume + type: object + status: + description: PodVolumeRestoreStatus is the current status of a PodVolumeRestore. + properties: + completionTimestamp: + description: CompletionTimestamp records the time a restore was completed. Completion time is recorded even on failed restores. The server's time is used for CompletionTimestamps + format: date-time + nullable: true + type: string + message: + description: Message is a message about the pod volume restore's status. + type: string + phase: + description: Phase is the current state of the PodVolumeRestore. + enum: + - New + - InProgress + - Completed + - Failed + type: string + progress: + description: Progress holds the total number of bytes of the snapshot and the current number of restored bytes. This can be used to display progress information about the restore operation. + properties: + bytesDone: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + type: object + startTimestamp: + description: StartTimestamp records the time a restore was started. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: resticrepositories.velero.io + spec: + group: velero.io + names: + kind: ResticRepository + listKind: ResticRepositoryList + plural: resticrepositories + singular: resticrepository + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ResticRepositorySpec is the specification for a ResticRepository. + properties: + backupStorageLocation: + description: BackupStorageLocation is the name of the BackupStorageLocation that should contain this repository. + type: string + maintenanceFrequency: + description: MaintenanceFrequency is how often maintenance should be run. + type: string + resticIdentifier: + description: ResticIdentifier is the full restic-compatible string for identifying this repository. + type: string + volumeNamespace: + description: VolumeNamespace is the namespace this restic repository contains pod volume backups for. + type: string + required: + - backupStorageLocation + - maintenanceFrequency + - resticIdentifier + - volumeNamespace + type: object + status: + description: ResticRepositoryStatus is the current status of a ResticRepository. + properties: + lastMaintenanceTime: + description: LastMaintenanceTime is the last time maintenance was run. + format: date-time + nullable: true + type: string + message: + description: Message is a message about the current status of the ResticRepository. + type: string + phase: + description: Phase is the current state of the ResticRepository. + enum: + - New + - Ready + - NotReady + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: restores.velero.io + spec: + group: velero.io + names: + kind: Restore + listKind: RestoreList + plural: restores + singular: restore + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Restore is a Velero resource that represents the application of resources from a Velero backup to a target Kubernetes cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RestoreSpec defines the specification for a Velero restore. + properties: + backupName: + description: BackupName is the unique name of the Velero backup to restore from. + type: string + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the restore. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the restore. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed during or post restore. + properties: + resources: + items: + description: RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + postHooks: + description: PostHooks is a list of RestoreResourceHooks to execute during and after restoring a resource. + items: + description: RestoreResourceHook defines a restore hook for a resource. + properties: + exec: + description: Exec defines an exec restore hook. + properties: + command: + description: Command is the command and arguments to execute from within a container after a pod has been restored. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + execTimeout: + description: ExecTimeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + waitTimeout: + description: WaitTimeout defines the maximum amount of time Velero should wait for the container to be Ready before attempting to run the command. + type: string + required: + - command + type: object + init: + description: Init defines an init restore hook. + properties: + initContainers: + description: InitContainers is list of init containers to be added to a pod during its restore. + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the initContainers to complete. + type: string + type: object + type: object + type: array + required: + - name + type: object + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the restore. If null, defaults to true. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the restore. If empty, all resources in the backup are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when restoring individual objects from the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaceMapping: + additionalProperties: + type: string + description: NamespaceMapping is a map of source namespace names to target namespace names to restore into. Any source namespaces not included in the map will be restored into namespaces of the same name. + type: object + restorePVs: + description: RestorePVs specifies whether to restore all included PVs from snapshot (via the cloudprovider). + nullable: true + type: boolean + scheduleName: + description: ScheduleName is the unique name of the Velero schedule to restore from. If specified, and BackupName is empty, Velero will restore from the most recent successful backup created from this schedule. + type: string + required: + - backupName + type: object + status: + description: RestoreStatus captures the current status of a Velero restore + properties: + completionTimestamp: + description: CompletionTimestamp records the time the restore operation was completed. Completion time is recorded even on failed restore. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + errors: + description: Errors is a count of all error messages that were generated during execution of the restore. The actual errors are stored in object storage. + type: integer + failureReason: + description: FailureReason is an error that caused the entire restore to fail. + type: string + phase: + description: Phase is the current state of the Restore + enum: + - New + - FailedValidation + - InProgress + - Completed + - PartiallyFailed + - Failed + type: string + startTimestamp: + description: StartTimestamp records the time the restore operation was started. The server's time is used for StartTimestamps + format: date-time + nullable: true + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable) + items: + type: string + nullable: true + type: array + warnings: + description: Warnings is a count of all warning messages that were generated during execution of the restore. The actual warnings are stored in object storage. + type: integer + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: schedules.velero.io + spec: + group: velero.io + names: + kind: Schedule + listKind: ScheduleList + plural: schedules + singular: schedule + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: Schedule is a Velero resource that represents a pre-scheduled or periodic Backup that should be run. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ScheduleSpec defines the specification for a Velero schedule + properties: + schedule: + description: Schedule is a Cron expression defining when to run the Backup. + type: string + template: + description: Template is the definition of the Backup to be run on the provided schedule + properties: + defaultVolumesToRestic: + description: DefaultVolumesToRestic specifies whether restic should be used to take a backup of all pod volumes by default. + type: boolean + excludedNamespaces: + description: ExcludedNamespaces contains a list of namespaces that are not included in the backup. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources is a slice of resource names that are not included in the backup. + items: + type: string + nullable: true + type: array + hooks: + description: Hooks represent custom behaviors that should be executed at different phases of the backup. + properties: + resources: + description: Resources are hooks that should be executed when backing up individual instances of a resource. + items: + description: BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + name: + description: Name is the name of this hook. + type: string + post: + description: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + pre: + description: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed. + items: + description: BackupResourceHook defines a hook for a resource. + properties: + exec: + description: Exec defines an exec hook. + properties: + command: + description: Command is the command and arguments to execute. + items: + type: string + minItems: 1 + type: array + container: + description: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used. + type: string + onError: + description: OnError specifies how Velero should behave if it encounters an error executing this hook. + enum: + - Continue + - Fail + type: string + timeout: + description: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure. + type: string + required: + - command + type: object + required: + - exec + type: object + type: array + required: + - name + type: object + nullable: true + type: array + type: object + includeClusterResources: + description: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup. + nullable: true + type: boolean + includedNamespaces: + description: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + orderedResources: + additionalProperties: + type: string + description: OrderedResources specifies the backup order of resources of specific Kind. The map key is the Kind name and value is a list of resource names separeted by commas. Each resource name has format "namespace/resourcename". For cluster resources, simply use "resourcename". + nullable: true + type: object + snapshotVolumes: + description: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup. + nullable: true + type: boolean + storageLocation: + description: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + type: string + ttl: + description: TTL is a time.Duration-parseable string describing how long the Backup should be retained for. + type: string + volumeSnapshotLocations: + description: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + items: + type: string + type: array + type: object + required: + - schedule + - template + type: object + status: + description: ScheduleStatus captures the current state of a Velero schedule + properties: + lastBackup: + description: LastBackup is the last time a Backup was run for this Schedule schedule + format: date-time + nullable: true + type: string + phase: + description: Phase is the current phase of the Schedule + enum: + - New + - Enabled + - FailedValidation + type: string + validationErrors: + description: ValidationErrors is a slice of all validation errors (if applicable) + items: + type: string + type: array + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: serverstatusrequests.velero.io + spec: + group: velero.io + names: + kind: ServerStatusRequest + listKind: ServerStatusRequestList + plural: serverstatusrequests + shortNames: + - ssr + singular: serverstatusrequest + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ServerStatusRequest is a request to access current status information about the Velero server. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ServerStatusRequestSpec is the specification for a ServerStatusRequest. + type: object + status: + description: ServerStatusRequestStatus is the current status of a ServerStatusRequest. + properties: + phase: + description: Phase is the current lifecycle phase of the ServerStatusRequest. + enum: + - New + - Processed + type: string + plugins: + description: Plugins list information about the plugins running on the Velero server + items: + description: PluginInfo contains attributes of a Velero plugin + properties: + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + nullable: true + type: array + processedTimestamp: + description: ProcessedTimestamp is when the ServerStatusRequest was processed by the ServerStatusRequestController. + format: date-time + nullable: true + type: string + serverVersion: + description: ServerVersion is the Velero server version. + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +{{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "volumesnapshotlocations.velero.io") }} +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + "helm.sh/resource-policy": keep + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: volumesnapshotlocations.velero.io + spec: + group: velero.io + names: + kind: VolumeSnapshotLocation + listKind: VolumeSnapshotLocationList + plural: volumesnapshotlocations + singular: volumesnapshotlocation + preserveUnknownFields: false + scope: Namespaced + validation: + openAPIV3Schema: + description: VolumeSnapshotLocation is a location where Velero stores volume snapshots. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation. + properties: + config: + additionalProperties: + type: string + description: Config is for provider-specific configuration fields. + type: object + provider: + description: Provider is the provider of the volume storage. + type: string + required: + - provider + type: object + status: + description: VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation. + properties: + phase: + description: VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation. + enum: + - Available + - Unavailable + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +{{- end }} +- apiVersion: v1 + kind: ServiceAccount + metadata: + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: {{ .Values.namespace }} + namespace: {{ .Values.namespace }} +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + creationTimestamp: null + labels: + component: kubeagent-backup-helper + name: {{ .Values.namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: {{ .Values.namespace }} + namespace: {{ .Values.namespace }} +{{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "volumesnapshotclasses.snapshot.storage.k8s.io") }} +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + "helm.sh/resource-policy": keep + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io + spec: + additionalPrinterColumns: + - JSONPath: .driver + name: Driver + type: string + - JSONPath: .deletionPolicy + description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass + should be deleted when its bound VolumeSnapshot is deleted. + name: DeletionPolicy + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + preserveUnknownFields: false + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} +{{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "volumesnapshotcontents.snapshot.storage.k8s.io") }} +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + "helm.sh/resource-policy": keep + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io + spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot in bytes + name: RestoreSize + type: integer + - JSONPath: .spec.deletionPolicy + description: Determines whether this VolumeSnapshotContent and its physical snapshot + on the underlying storage system should be deleted when its bound VolumeSnapshot + is deleted. + name: DeletionPolicy + type: string + - JSONPath: .spec.driver + description: Name of the CSI driver used to create the physical snapshot on the + underlying storage system. + name: Driver + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: Name of the VolumeSnapshotClass to which this snapshot belongs. + name: VolumeSnapshotClass + type: string + - JSONPath: .spec.volumeSnapshotRef.name + description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + name: VolumeSnapshot + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +{{- end }} +{{- if not (lookup "apiextensions.k8s.io/v1" "CustomResourceDefinition" "" "volumesnapshots.snapshot.storage.k8s.io") }} +- apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139" + "helm.sh/resource-policy": keep + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io + spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .spec.source.persistentVolumeClaimName + description: Name of the source PVC from where a dynamically taken snapshot will + be created. + name: SourcePVC + type: string + - JSONPath: .spec.source.volumeSnapshotContentName + description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + name: SourceSnapshotContent + type: string + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot. + name: RestoreSize + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + name: SnapshotClass + type: string + - JSONPath: .status.boundVolumeSnapshotContentName + description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + name: SnapshotContent + type: string + - JSONPath: .status.creationTime + description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + name: CreationTime + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + status: + # acceptedNames: + # kind: "" + # plural: "" + conditions: [] + # storedVersions: [] +{{- end }} +- apiVersion: apps/v1 + kind: Deployment + metadata: + name: kubeagent + namespace: {{ .Values.namespace }} + spec: + selector: + matchLabels: + app: kubeagent + strategy: {} + replicas: 1 + template: + metadata: + labels: + app: kubeagent + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8085" + prometheus.io/scrape: "true" + spec: + containers: + - image: "{{ .Values.kagent_image.repository }}:{{ .Values.kagent_image.tag }}" + args: ["/usr/local/bin/kubeagent", "--server_addr", "agent.cloudcasa.io:443", "--tls", "true"] + name: kubeagent + resources: + requests: + memory: "{{ .Values.request_kubeagent_memory }}" + cpu: "{{ .Values.request_kubeagent_cpu }}" + limits: + memory: "{{ .Values.limit_kubeagent_memory }}" + cpu: "{{ .Values.limit_kubeagent_cpu }}" + env: + - name: AMDS_CLUSTER_ID + value: "{{ .Values.cluster_id }}" + - name: KUBEMOVER_IMAGE + value: "{{ .Values.kagent_image.repository }}:{{ .Values.kagent_image.tag }}" + volumeMounts: + - mountPath: /credentials + name: cloud-credentials + - mountPath: /plugins + name: plugins + - mountPath: /scratch + name: scratch + - image: "{{ .Values.velero_image.repository }}:{{ .Values.velero_image.tag }}" + imagePullPolicy: IfNotPresent + name: kubeagent-backup-helper + args: + - server + - --features=EnableCSI,EnableAPIGroupVersions + - --backup-sync-period=0s + - --store-validation-frequency=0s + # (24 hours * 365 days * 10 years) + (2 * 24 hours[for leap years]) = 87648 hours + - --default-backup-ttl=87648h0m0s + - --disable-controllers=backup-sync,schedule,gc,download-request,restic-repo,server-status-request + - --log-format=json + - --log-level=info + command: + - /velero + env: + - name: VELERO_SCRATCH_DIR + value: /scratch + - name: VELERO_NAMESPACE + value: {{ .Values.namespace }} + - name: LD_LIBRARY_PATH + value: /plugins + - name: AWS_SHARED_CREDENTIALS_FILE + value: /credentials/s3/cloud + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /credentials/gcp/cloud + - name: AZURE_CREDENTIALS_FILE + value: /credentials/azure/cloud + ports: + - containerPort: 8085 + name: metrics + resources: + limits: + cpu: "{{ .Values.limit_velero_cpu }}" + memory: "{{ .Values.limit_velero_memory }}" + requests: + cpu: "{{ .Values.request_velero_cpu }}" + memory: "{{ .Values.request_velero_memory }}" + volumeMounts: + - mountPath: /plugins + name: plugins + - mountPath: /scratch + name: scratch + - mountPath: /credentials + name: cloud-credentials + initContainers: + - image: "{{ .Values.velero_aws_plugin_image.repository }}:{{ .Values.velero_aws_plugin_image.tag }}" + imagePullPolicy: IfNotPresent + name: velero-plugin-for-aws + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + - image: "{{ .Values.velero_csi_plugin_image.repository }}:{{ .Values.velero_csi_plugin_image.tag }}" + imagePullPolicy: IfNotPresent + name: velero-plugin-for-csi + resources: {} + volumeMounts: + - mountPath: /target + name: plugins + restartPolicy: Always + serviceAccountName: {{ .Values.namespace }} + volumes: + - emptyDir: {} + name: plugins + - emptyDir: {} + name: scratch + - emptyDir: {} + name: cloud-credentials \ No newline at end of file diff --git a/charts/cloudcasa/cloudcasa/1/values.yaml b/charts/cloudcasa/cloudcasa/1/values.yaml new file mode 100644 index 000000000..e51d6a8d3 --- /dev/null +++ b/charts/cloudcasa/cloudcasa/1/values.yaml @@ -0,0 +1,92 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry +## + +## Required namespace +namespace: cloudcasa-io + +## Velero csi plugin image +## ref: https://hub.docker.com/r/velero/velero-plugin-for-csi/tags/ +velero_csi_plugin_image: + registry: docker.io + repository: catalogicsoftware/velero-plugin-for-csi + tag: v0.1.2.3 + pullPolicy: IfNotPresent + debug: false +## + +## Velero aws plugin image +## ref: https://hub.docker.com/r/velero/velero-plugin-for-aws/tags/ +velero_aws_plugin_image: + registry: docker.io + repository: velero/velero-plugin-for-aws + tag: v1.1.0 + pullPolicy: IfNotPresent + debug: false +## + +## Velero image +## ref: https://hub.docker.com/r/velero/velero/tags +velero_image: + registry: docker.io + repository: velero/velero + tag: v1.5.3 + pullPolicy: IfNotPresent + debug: false +## + +## Cloudcasa kubeagent image +## ref: https://hub.docker.com/r/catalogicsoftware/amds-kagent/tags/ +kagent_image: + registry: docker.io + repository: catalogicsoftware/amds-kagent + tag: 0.1.0-prod.47 + pullPolicy: IfNotPresent + debug: false +## + +## Cloudcasa AMDS Cluster ID. To be provided by the user +cluster_id: "" + +## Resources to be used by velero and kubeagent pods +request_velero_memory: 128Mi +request_velero_cpu: 500m +limit_velero_memory: 512Mi +limit_velero_cpu: 1 +request_kubeagent_memory: 32Mi +request_kubeagent_cpu: 250m +limit_kubeagent_memory: 64Mi +limit_kubeagent_cpu: 500m + +## Placeholder configuration below this line ## +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + annotations: {} + name: "" + +podAnnotations: {} + +podSecurityContext: {} + +securityContext: {} + +resources: {} + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 1 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} +## diff --git a/charts/cockroachdb/cockroachdb/4.1.200/Chart.yaml b/charts/cockroachdb/cockroachdb/4.1.200/Chart.yaml new file mode 100644 index 000000000..b8ce2f514 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/Chart.yaml @@ -0,0 +1,15 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: cockroachdb +apiVersion: v1 +appVersion: 20.1.3 +description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. +home: https://www.cockroachlabs.com +icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png +maintainers: +- email: helm-charts@cockroachlabs.com + name: cockroachlabs +name: cockroachdb +sources: +- https://github.com/cockroachdb/cockroach +version: 4.1.200 diff --git a/charts/cockroachdb/cockroachdb/4.1.200/README.md b/charts/cockroachdb/cockroachdb/4.1.200/README.md new file mode 100644 index 000000000..cbb90aa27 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/README.md @@ -0,0 +1,477 @@ +# CockroachDB Helm Chart + +[CockroachDB](https://github.com/cockroachdb/cockroach) - the open source, cloud-native distributed SQL database. + +## Documentation + +Below is a brief overview of operating the CockroachDB Helm Chart and some specific implementation details. For additional information on deploying CockroachDB, please see: +> + +Note that the documentation requires Helm 3.0 or higher. + +## Prerequisites Details + +* Kubernetes 1.8 +* PV support on the underlying infrastructure (only if using `storage.persistentVolume`). [Docker for windows hostpath provisioner is not supported](https://github.com/cockroachdb/docs/issues/3184). +* If you want to secure your cluster to use TLS certificates for all network communication, [Helm must be installed with RBAC privileges](https://helm.sh/docs/topics/rbac/) or else you will get an "attempt to grant extra privileges" error. + +## StatefulSet Details + +* + +## StatefulSet Caveats + +* + +## Chart Details + +This chart will do the following: + +* Set up a dynamically scalable CockroachDB cluster using a Kubernetes StatefulSet. + +## Add the CockroachDB Repository + +```shell +helm repo add cockroachdb https://charts.cockroachdb.com/ +``` + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```shell +helm install my-release cockroachdb/cockroachdb +``` + +Note that for a production cluster, you will likely want to override the following parameters in [`values.yaml`](values.yaml) with your own values. + +- `statefulset.resources.requests.memory` and `statefulset.resources.limits.memory` allocate memory resources to CockroachDB pods in your cluster. +- `conf.cache` and `conf.max-sql-memory` are memory limits that we recommend setting to 1/4 of the above resource allocation. When running CockroachDB, you must set these limits explicitly to avoid running out of memory. +- `storage.persistentVolume.size` defaults to `100Gi` of disk space per pod, which you may increase or decrease for your use case. +- `storage.persistentVolume.storageClass` uses the default storage class for your environment. We strongly recommend that you specify a storage class which uses an SSD. +- `tls.enabled` must be set to `yes`/`true` to deploy in secure mode. + +For more information on overriding the `values.yaml` parameters, please see: +> + +If you are running in secure mode (with configuration parameter `tls.enabled` set to `yes`/`true`) and `tls.certs.provided` set to `no`/`false`), then you will have to manually approve the cluster's security certificates as the pods are created. You can see the pending CSRs (certificate signing requests) by running `kubectl get csr`, and approve them by running `kubectl certificate approve `. You'll have to approve one certificate for each CockroachDB node (e.g., `default.node.my-release-cockroachdb-0` and one client certificate for the job that initializes the cluster (e.g., `default.node.root`). + +When `tls.certs.provided` is set to `yes`/`true`, this chart will use certificates created outside of Kubernetes. You may want to use this if you want to use a different certificate authority from the one being used by Kubernetes or if your Kubernetes cluster doesn't fully support certificate-signing requests. To use this, first set up your certificates and load them into your Kubernetes cluster as Secrets using the commands below: + +``` +mkdir certs +mkdir my-safe-directory +cockroach cert create-ca --certs-dir=certs --ca-key=my-safe-directory/ca.key +cockroach cert create-client root --certs-dir=certs --ca-key=my-safe-directory/ca.key +kubectl create secret generic cockroachdb-root --from-file=certs +cockroach cert create-node --certs-dir=certs --ca-key=my-safe-directory/ca.key localhost 127.0.0.1 eerie-horse-cockroachdb-public eerie-horse-cockroachdb-public.default eerie-horse-cockroachdb-public.default.svc.cluster.local *.eerie-horse-cockroachdb *.eerie-horse-cockroachdb.default *.eerie-horse-cockroachdb.default.svc.cluster.local +kubectl create secret generic cockroachdb-node --from-file=certs +``` + +Set `tls.certs.tlsSecret` to `yes/true` if you make use of [cert-manager][3] in your cluster. + +[cert-manager][3] stores generated certificates in dedicated TLS secrets. Thus, they are always named: + +* `ca.crt` +* `tls.crt` +* `tls.key` + +On the other hand, CockroachDB also demands dedicated certificate filenames: + +* `ca.crt` +* `node.crt` +* `node.key` +* `client.root.crt` +* `client.root.key` + +By activating `tls.certs.tlsSecret` we benefit from projected secrets and convert the TLS secret filenames to their according CockroachDB filenames. + +If you are running in secure mode, then you will have to manually approve the cluster's security certificates as the pods are created. You can see the pending CSRs (certificate signing requests) by running `kubectl get csr`, and approve them by running `kubectl certificate approve `. You'll have to approve one certificate for each CockroachDB node (e.g., `default.node.my-release-cockroachdb-0` and one client certificate for the job that initializes the cluster (e.g., `default.node.root`). + +Confirm that all pods are `Running` successfully and init has been completed: + +```shell +kubectl get pods +``` + +``` +NAME READY STATUS RESTARTS AGE +my-release-cockroachdb-0 1/1 Running 0 1m +my-release-cockroachdb-1 1/1 Running 0 1m +my-release-cockroachdb-2 1/1 Running 0 1m +my-release-cockroachdb-init-k6jcr 0/1 Completed 0 1m +``` + +Confirm that persistent volumes are created and claimed for each pod: + +```shell +kubectl get pv +``` + +``` +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-64878ebf-f3f0-11e8-ab5b-42010a8e0035 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 51s +pvc-64945b4f-f3f0-11e8-ab5b-42010a8e0035 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 51s +pvc-649d920d-f3f0-11e8-ab5b-42010a8e0035 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 51s +``` + +## Upgrading the cluster + +### Chart version 3.0.0 and after + +Launch a temporary interactive pod and start the built-in SQL client: + +```shell +kubectl run cockroachdb --rm -it \ +--image=cockroachdb/cockroach \ +--restart=Never \ +-- sql --insecure --host=my-release-cockroachdb-public +``` + +> If you are running in secure mode, you will have to provide a client certificate to the cluster in order to authenticate, so the above command will not work. See [here](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/client-secure.yaml) for an example of how to set up an interactive SQL shell against a secure cluster or [here](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app-secure.yaml) for an example application connecting to a secure cluster. + +Set `cluster.preserve_downgrade_option`, where `$current_version` is the CockroachDB version currently running (e.g., `19.2`): + +```sql +> SET CLUSTER SETTING cluster.preserve_downgrade_option = '$current_version'; +``` + +Exit the shell and delete the temporary pod: + +```sql +> \q +``` + +Kick off the upgrade process by changing the new Docker image, where `$new_version` is the CockroachDB version to which you are upgrading: + +```shell +kubectl delete job my-release-cockroachdb-init +``` + +```shell +helm upgrade my-release cockroachdb/cockroachdb \ +--set image.tag=$new_version \ +--reuse-values +``` + +Kubernetes will carry out a safe [rolling upgrade](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) of your CockroachDB nodes one-by-one. Monitor the cluster's pods until all have been successfully restarted: + +```shell +kubectl get pods +``` + +``` +NAME READY STATUS RESTARTS AGE +my-release-cockroachdb-0 1/1 Running 0 2m +my-release-cockroachdb-1 1/1 Running 0 3m +my-release-cockroachdb-2 1/1 Running 0 3m +my-release-cockroachdb-3 0/1 ContainerCreating 0 25s +my-release-cockroachdb-init-nwjkh 0/1 ContainerCreating 0 6s +``` + +```shell +kubectl get pods \ +-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[0].image}{"\n"}' +``` + +``` +my-release-cockroachdb-0 cockroachdb/cockroach:v20.1.3 +my-release-cockroachdb-1 cockroachdb/cockroach:v20.1.3 +my-release-cockroachdb-2 cockroachdb/cockroach:v20.1.3 +my-release-cockroachdb-3 cockroachdb/cockroach:v20.1.3 +``` + +Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade: + +```shell +kubectl run cockroachdb --rm -it \ +--image=cockroachdb/cockroach \ +--restart=Never \ +-- sql --insecure --host=my-release-cockroachdb-public +``` + +```sql +> RESET CLUSTER SETTING cluster.preserve_downgrade_option; +> \q +``` + +### Chart versions prior to 3.0.0 + +Due to a change in the label format in version 3.0.0 of this chart, upgrading requires that you delete the StatefulSet. Luckily there is a way to do it without actually deleting all the resources managed by the StatefulSet. Use the workaround below to upgrade from charts versions previous to 3.0.0: + +Get the new labels from the specs rendered by Helm: + +```shell +helm template -f deploy.vals.yml cockroachdb/cockroachdb -x templates/statefulset.yaml \ +| yq r - spec.template.metadata.labels +``` + +``` +app.kubernetes.io/name: cockroachdb +app.kubernetes.io/instance: my-release +app.kubernetes.io/component: cockroachdb +``` + +Place the new labels on all pods of the StatefulSet (change `my-release-cockroachdb-0` to the name of each pod): + +```shell +kubectl label pods my-release-cockroachdb-0 \ +app.kubernetes.io/name=cockroachdb \ +app.kubernetes.io/instance=my-release \ +app.kubernetes.io/component=cockroachdb +``` + +Delete the StatefulSet without deleting pods: + +```shell +kubectl delete statefulset my-release-cockroachdb --cascade=false +``` + +Verify that no pod is deleted and then upgrade as normal. A new StatefulSet will be created, taking over the management of the existing pods and upgrading them if needed. + +### See also + +For more information about upgrading a cluster to the latest major release of CockroachDB, see [Upgrade to CockroachDB v20.1](https://www.cockroachlabs.com/docs/stable/upgrade-cockroach-version.html). + +Note that there are some backward-incompatible changes to SQL features between versions 19.2 and 20.1. For details, see the [CockroachDB v20.1.0 release notes](https://www.cockroachlabs.com/docs/releases/v20.1.0.html#backward-incompatible-changes). + +## Configuration + +The following table lists the configurable parameters of the CockroachDB chart and their default values. +For details see the [`values.yaml`](values.yaml) file. + +| Parameter | Description | Default | +| --------- | ----------- | ------- | +| `clusterDomain` | Cluster's default DNS domain | `cluster.local` | +| `conf.attrs` | CockroachDB node attributes | `[]` | +| `conf.cache` | Size of CockroachDB's in-memory cache | `25%` | +| `conf.cluster-name` | Name of CockroachDB cluster | `""` | +| `conf.disable-cluster-name-verification` | Disable CockroachDB cluster name verification | `no` | +| `conf.join` | List of already-existing CockroachDB instances | `[]` | +| `conf.max-disk-temp-storage` | Max storage capacity for temp data | `0` | +| `conf.max-offset` | Max allowed clock offset for CockroachDB cluster | `500ms` | +| `conf.max-sql-memory` | Max memory to use processing SQL querie | `25%` | +| `conf.locality` | Locality attribute for this deployment | `""` | +| `conf.single-node` | Disable CockroachDB clustering (standalone mode) | `no` | +| `conf.sql-audit-dir` | Directory for SQL audit log | `""` | +| `conf.port` | CockroachDB primary serving port in Pods | `26257` | +| `conf.http-port` | CockroachDB HTTP port in Pods | `8080` | +| `image.repository` | Container image name | `cockroachdb/cockroach` | +| `image.tag` | Container image tag | `v20.1.3` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` | +| `statefulset.replicas` | StatefulSet replicas number | `3` | +| `statefulset.updateStrategy` | Update strategy for StatefulSet Pods | `{"type": "RollingUpdate"}` | +| `statefulset.podManagementPolicy` | `OrderedReady`/`Parallel` Pods creation/deletion order | `Parallel` | +| `statefulset.budget.maxUnavailable` | k8s PodDisruptionBudget parameter | `1` | +| `statefulset.args` | Extra command-line arguments | `[]` | +| `statefulset.env` | Extra env vars | `[]` | +| `statefulset.secretMounts` | Additional Secrets to mount at cluster members | `[]` | +| `statefulset.labels` | Additional labels of StatefulSet and its Pods | `{"app.kubernetes.io/component": "cockroachdb"}` | +| `statefulset.annotations` | Additional annotations of StatefulSet Pods | `{}` | +| `statefulset.nodeAffinity` | [Node affinity rules][2] of StatefulSet Pods | `{}` | +| `statefulset.podAffinity` | [Inter-Pod affinity rules][1] of StatefulSet Pods | `{}` | +| `statefulset.podAntiAffinity` | [Anti-affinity rules][1] of StatefulSet Pods | auto | +| `statefulset.podAntiAffinity.type` | Type of auto [anti-affinity rules][1] | `soft` | +| `statefulset.podAntiAffinity.weight` | Weight for `soft` auto [anti-affinity rules][1] | `100` | +| `statefulset.nodeSelector` | Node labels for StatefulSet Pods assignment | `{}` | +| `statefulset.priorityClassName` | [PriorityClassName][4] for StatefulSet Pods | `""` | +| `statefulset.tolerations` | Node taints to tolerate by StatefulSet Pods | `[]` | +| `statefulset.resources` | Resource requests and limits for StatefulSet Pods | `{}` | +| `service.ports.grpc.external.port` | CockroachDB primary serving port in Services | `26257` | +| `service.ports.grpc.external.name` | CockroachDB primary serving port name in Services | `grpc` | +| `service.ports.grpc.internal.port` | CockroachDB inter-communication port in Services | `26257` | +| `service.ports.grpc.internal.name` | CockroachDB inter-communication port name in Services | `grpc-internal` | +| `service.ports.http.port` | CockroachDB HTTP port in Services | `8080` | +| `service.ports.http.name` | CockroachDB HTTP port name in Services | `http` | +| `service.public.type` | Public Service type | `ClusterIP` | +| `service.public.labels` | Additional labels of public Service | `{"app.kubernetes.io/component": "cockroachdb"}` | +| `service.public.annotations` | Additional annotations of public Service | `{}` | +| `service.discovery.labels` | Additional labels of discovery Service | `{"app.kubernetes.io/component": "cockroachdb"}` | +| `service.discovery.annotations` | Additional annotations of discovery Service | `{}` | +| `storage.hostPath` | Absolute path on host to store data | `""` | +| `storage.persistentVolume.enabled` | Whether to use PersistentVolume to store data | `yes` | +| `storage.persistentVolume.size` | PersistentVolume size | `100Gi` | +| `storage.persistentVolume.storageClass` | PersistentVolume class | `""` | +| `storage.persistentVolume.labels` | Additional labels of PersistentVolumeClaim | `{}` | +| `storage.persistentVolume.annotations` | Additional annotations of PersistentVolumeClaim | `{}` | +| `init.labels` | Additional labels of init Job and its Pod | `{"app.kubernetes.io/component": "init"}` | +| `init.annotations` | Additional labels of the Pod of init Job | `{}` | +| `init.affinity` | [Affinity rules][2] of init Job Pod | `{}` | +| `init.nodeSelector` | Node labels for init Job Pod assignment | `{}` | +| `init.tolerations` | Node taints to tolerate by init Job Pod | `[]` | +| `init.resources` | Resource requests and limits for the Pod of init Job | `{}` | +| `tls.enabled` | Whether to run securely using TLS certificates | `no` | +| `tls.serviceAccount.create` | Whether to create a new RBAC service account | `yes` | +| `tls.serviceAccount.name` | Name of RBAC service account to use | `""` | +| `tls.certs.provided` | Bring your own certs scenario, i.e certificates are provided | `no` | +| `tls.certs.clientRootSecret` | If certs are provided, secret name for client root cert | `cockroachdb-root` | +| `tls.certs.nodeSecret` | If certs are provided, secret name for node cert | `cockroachdb-node` | +| `tls.certs.tlsSecret` | Own certs are stored in TLS secret | `no` | +| `tls.init.image.repository` | Image to use for requesting TLS certificates | `cockroachdb/cockroach-k8s-request-cert` | +| `tls.init.image.tag` | Image tag to use for requesting TLS certificates | `0.4` | +| `tls.init.image.pullPolicy` | Requesting TLS certificates container pull policy | `IfNotPresent` | +| `tls.init.image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` | +| `networkPolicy.enabled` | Enable NetworkPolicy for CockroachDB's Pods | `no` | +| `networkPolicy.ingress.grpc` | Whitelist resources to access gRPC port of CockroachDB's Pods | `[]` | +| `networkPolicy.ingress.http` | Whitelist resources to access gRPC port of CockroachDB's Pods | `[]` | + +Override the default parameters using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies custom values for the parameters can be provided while installing the chart. For example: + +```shell +helm install my-release -f my-values.yaml cockroachdb/cockroachdb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Deep dive + +### Connecting to the CockroachDB cluster + +Once you've created the cluster, you can start talking to it by connecting to its `-public` Service. CockroachDB is PostgreSQL wire protocol compatible, so there's a [wide variety of supported clients](https://www.cockroachlabs.com/docs/install-client-drivers.html). As an example, we'll open up a SQL shell using CockroachDB's built-in shell and play around with it a bit, like this (likely needing to replace `my-release-cockroachdb-public` with the name of the `-public` Service that was created with your installed chart): + +```shell +kubectl run cockroach-client --rm -it \ +--image=cockroachdb/cockroach \ +--restart=Never \ +-- sql --insecure --host my-release-cockroachdb-public +``` + +``` +Waiting for pod default/cockroach-client to be running, status is Pending, +pod ready: false +If you don't see a command prompt, try pressing enter. +root@my-release-cockroachdb-public:26257> SHOW DATABASES; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| pg_catalog | +| system | ++--------------------+ +(3 rows) +root@my-release-cockroachdb-public:26257> CREATE DATABASE bank; +CREATE DATABASE +root@my-release-cockroachdb-public:26257> CREATE TABLE bank.accounts (id INT +PRIMARY KEY, balance DECIMAL); +CREATE TABLE +root@my-release-cockroachdb-public:26257> INSERT INTO bank.accounts VALUES +(1234, 10000.50); +INSERT 1 +root@my-release-cockroachdb-public:26257> SELECT * FROM bank.accounts; ++------+---------+ +| id | balance | ++------+---------+ +| 1234 | 10000.5 | ++------+---------+ +(1 row) +root@my-release-cockroachdb-public:26257> \q +Waiting for pod default/cockroach-client to terminate, status is Running +pod "cockroach-client" deleted +``` + +> If you are running in secure mode, you will have to provide a client certificate to the cluster in order to authenticate, so the above command will not work. See [here](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/client-secure.yaml) for an example of how to set up an interactive SQL shell against a secure cluster or [here](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app-secure.yaml) for an example application connecting to a secure cluster. + +### Cluster health + +Because our pod spec includes regular health checks of the CockroachDB processes, simply running `kubectl get pods` and looking at the `STATUS` column is sufficient to determine the health of each instance in the cluster. + +If you want more detailed information about the cluster, the best place to look is the Admin UI. + +### Accessing the Admin UI + +If you want to see information about how the cluster is doing, you can try pulling up the CockroachDB Admin UI by port-forwarding from your local machine to one of the pods (replacing `my-release-cockroachdb-0` with the name of one of your pods: + +```shell +kubectl port-forward my-release-cockroachdb-0 8080 +``` + +You should then be able to access the Admin UI by visiting in your web browser. + +### Failover + +If any CockroachDB member fails, it is restarted or recreated automatically by the Kubernetes infrastructure, and will re-join the cluster automatically when it comes back up. You can test this scenario by killing any of the CockroachDB pods: + +```shell +kubectl delete pod my-release-cockroachdb-1 +``` + +```shell +kubectl get pods -l "app.kubernetes.io/instance=my-release,app.kubernetes.io/component=cockroachdb" +``` + +``` +NAME READY STATUS RESTARTS AGE +my-release-cockroachdb-0 1/1 Running 0 5m +my-release-cockroachdb-2 1/1 Running 0 5m +``` + +After a while: + +```shell +kubectl get pods -l "app.kubernetes.io/instance=my-release,app.kubernetes.io/component=cockroachdb" +``` + +``` +NAME READY STATUS RESTARTS AGE +my-release-cockroachdb-0 1/1 Running 0 5m +my-release-cockroachdb-1 1/1 Running 0 20s +my-release-cockroachdb-2 1/1 Running 0 5m +``` + +You can check the state of re-joining from the new pod's logs: + +```shell +kubectl logs my-release-cockroachdb-1 +``` + +``` +[...] +I161028 19:32:09.754026 1 server/node.go:586 [n1] node connected via gossip and +verified as part of cluster {"35ecbc27-3f67-4e7d-9b8f-27c31aae17d6"} +[...] +cockroachdb-0.my-release-cockroachdb.default.svc.cluster.local:26257 +build: beta-20161027-55-gd2d3c7f @ 2016/10/28 19:27:25 (go1.7.3) +admin: http://0.0.0.0:8080 +sql: +postgresql://root@my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local:26257?sslmode=disable +logs: cockroach-data/logs +store[0]: path=cockroach-data +status: restarted pre-existing node +clusterID: {35ecbc27-3f67-4e7d-9b8f-27c31aae17d6} +nodeID: 2 +[...] +``` + +### NetworkPolicy + +To enable NetworkPolicy for CockroachDB, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `yes`/`true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the `DefaultDeny` Namespace annotation. Note: this will enforce policy for _all_ pods in the Namespace: + +```shell +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +For more precise policy, set `networkPolicy.ingress.grpc` and `networkPolicy.ingress.http` rules. This will only allow pods that match the provided rules to connect to CockroachDB. + +### Scaling + +Scaling should be managed via the `helm upgrade` command. After resizing your cluster on your cloud environment (e.g., GKE or EKS), run the following command to add a pod. This assumes you scaled from 3 to 4 nodes: + +```shell +helm upgrade \ +my-release \ +cockroachdb/cockroachdb \ +--set statefulset.replicas=4 \ +--reuse-values +``` + +Note, that if you are running in secure mode (`tls.enabled` is `yes`/`true`) and increase the size of your cluster, you will also have to approve the CSR (certificate-signing request) of each new node (using `kubectl get csr` and `kubectl certificate approve`). + +[1]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity +[2]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity +[3]: https://cert-manager.io/ +[4]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass diff --git a/charts/cockroachdb/cockroachdb/4.1.200/app-readme.md b/charts/cockroachdb/cockroachdb/4.1.200/app-readme.md new file mode 100644 index 000000000..8fcc1fd6f --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/app-readme.md @@ -0,0 +1,9 @@ +# CockroachDB Chart + +CockroachDB is a Distributed SQL database that runs natively in Kubernetes. It gives you resilient, horizontal scale across multiple clouds with always-on availability and data partitioned by location. + +CockroachDB scales horizontally without reconfiguration or need for a massive architectural overhaul. Simply add a new node to the cluster and CockroachDB takes care of the underlying complexity. + + - Scale by simply adding new nodes to a CockroachDB cluster + - Automate balancing and distribution of ranges, not shards + - Optimize server utilization evenly across all nodes diff --git a/charts/cockroachdb/cockroachdb/4.1.200/questions.yml b/charts/cockroachdb/cockroachdb/4.1.200/questions.yml new file mode 100644 index 000000000..729c1fd58 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/questions.yml @@ -0,0 +1,8 @@ +questions: +- default: 100Gi + variable: Storage + description: "Size of volume for each CockroachDB Node/Pod" + group: Config + label: "Storage per Node/Pod" + required: true + type: string diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/NOTES.txt b/charts/cockroachdb/cockroachdb/4.1.200/templates/NOTES.txt new file mode 100644 index 000000000..797d5292d --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/NOTES.txt @@ -0,0 +1,50 @@ +CockroachDB can be accessed via port {{ .Values.service.ports.grpc.external.port }} at the +following DNS name from within your cluster: + +{{ template "cockroachdb.fullname" . }}-public.{{ .Release.Namespace }}.svc.cluster.local + +Because CockroachDB supports the PostgreSQL wire protocol, you can connect to +the cluster using any available PostgreSQL client. + +{{- if not .Values.tls.enabled }} + +For example, you can open up a SQL shell to the cluster by running: + + kubectl run -it --rm cockroach-client \ + --image=cockroachdb/cockroach \ + --restart=Never \ + {{- if .Values.networkPolicy.enabled }} + --labels="{{ template "cockroachdb.fullname" . }}-client=true" \ + {{- end }} + --command -- \ + ./cockroach sql --insecure --host={{ template "cockroachdb.fullname" . }}-public.{{ .Release.Namespace }} + +From there, you can interact with the SQL shell as you would any other SQL +shell, confident that any data you write will be safe and available even if +parts of your cluster fail. +{{- else }} + +Note that because the cluster is running in secure mode, any client application +that you attempt to connect will either need to have a valid client certificate +or a valid username and password. +{{- end }} + +{{- if and (.Values.networkPolicy.enabled) (not (empty .Values.networkPolicy.ingress.grpc)) }} + +Note: Since NetworkPolicy is enabled, the only Pods allowed to connect to this +CockroachDB cluster are: + +1. Having the label: "{{ template "cockroachdb.fullname" . }}-client=true" + +2. Matching the following rules: {{- toYaml .Values.networkPolicy.ingress.grpc | nindent 0 }} +{{- end }} + +Finally, to open up the CockroachDB admin UI, you can port-forward from your +local machine into one of the instances in the cluster: + + kubectl port-forward {{ template "cockroachdb.fullname" . }}-0 {{ index .Values.conf `http-port` | int64 }} + +Then you can access the admin UI at http{{ if .Values.tls.enabled }}s{{ end }}://localhost:{{ index .Values.conf `http-port` | int64 }}/ in your web browser. + +For more information on using CockroachDB, please see the project's docs at: +https://www.cockroachlabs.com/docs/ diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/_helpers.tpl b/charts/cockroachdb/cockroachdb/4.1.200/templates/_helpers.tpl new file mode 100644 index 000000000..5de031357 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/_helpers.tpl @@ -0,0 +1,64 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "cockroachdb.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 56 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cockroachdb.fullname" -}} +{{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 56 | trimSuffix "-" -}} +{{- else -}} + {{- $name := default .Chart.Name .Values.nameOverride -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 56 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 56 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cockroachdb.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 56 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the ServiceAccount to use. +*/}} +{{- define "cockroachdb.tls.serviceAccount.name" -}} +{{- if .Values.tls.serviceAccount.create -}} + {{- default (include "cockroachdb.fullname" .) .Values.tls.serviceAccount.name -}} +{{- else -}} + {{- default "default" .Values.tls.serviceAccount.name -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for NetworkPolicy. +*/}} +{{- define "cockroachdb.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <=1.7-0" .Capabilities.KubeVersion.GitVersion -}} + {{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} + {{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for StatefulSets +*/}} +{{- define "cockroachdb.statefulset.apiVersion" -}} +{{- if semverCompare "<1.12-0" .Capabilities.KubeVersion.GitVersion -}} + {{- print "apps/v1beta1" -}} +{{- else -}} + {{- print "apps/v1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrole.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrole.yaml new file mode 100644 index 000000000..77f1ffea1 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.tls.enabled (not .Values.tls.certs.provided) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["create", "get", "watch"] +{{- end }} \ No newline at end of file diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrolebinding.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..3222317ff --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.tls.enabled (not .Values.tls.certs.provided) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cockroachdb.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cockroachdb.tls.serviceAccount.name" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/ingress.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/ingress.yaml new file mode 100644 index 000000000..a4e7389c9 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/ingress.yaml @@ -0,0 +1,52 @@ +{{- if .Values.ingress.enabled -}} +{{- $paths := .Values.ingress.paths -}} +{{- $ports := .Values.service.ports -}} +{{- $fullName := include "cockroachdb.fullname" . -}} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: +{{- if .Values.ingress.annotations }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +{{- end }} + name: {{ $fullName }}-ingress + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} +{{- if .Values.ingress.labels }} +{{- toYaml .Values.ingress.labels | nindent 4 }} +{{- end }} +spec: + rules: + {{- if .Values.ingress.hosts }} + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + {{- range $path := $paths }} + - path: {{ $path }} + backend: + serviceName: {{ $fullName }}-public + servicePort: {{ $ports.http.name | quote }} + {{- end -}} + {{- end -}} + {{- else }} + - http: + paths: + {{- range $path := $paths }} + - path: {{ $path }} + backend: + serviceName: {{ $fullName }}-public + servicePort: {{ $ports.http.name | quote }} + {{- end -}} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{- toYaml .Values.ingress.tls | nindent 4 }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/job.init.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/job.init.yaml new file mode 100644 index 000000000..e0901f88d --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/job.init.yaml @@ -0,0 +1,157 @@ +{{- if not (index .Values.conf `single-node`) }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ template "cockroachdb.fullname" . }}-init + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.init.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.init.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.init.annotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if or .Values.image.credentials (and .Values.tls.enabled .Values.tls.init.image.credentials (not .Values.tls.certs.provided)) }} + imagePullSecrets: + {{- if .Values.image.credentials }} + - name: {{ template "cockroachdb.fullname" . }}.db.registry + {{- end }} + {{- if and .Values.tls.enabled .Values.tls.init.image.credentials (not .Values.tls.certs.provided) }} + - name: {{ template "cockroachdb.fullname" . }}.init-certs.registry + {{- end }} + {{- end }} + {{- if and .Values.tls.enabled (not .Values.tls.certs.provided)}} + serviceAccountName: {{ template "cockroachdb.tls.serviceAccount.name" . }} + initContainers: + # The init-certs container sends a CSR (certificate signing request) to + # the Kubernetes cluster. + # You can see pending requests using: + # kubectl get csr + # CSRs can be approved using: + # kubectl certificate approve + # + # In addition to the Node certificate and key, the init-certs entrypoint + # will symlink the cluster CA to the certs directory. + - name: init-certs + image: "{{ .Values.tls.init.image.repository }}:{{ .Values.tls.init.image.tag }}" + imagePullPolicy: {{ .Values.tls.init.image.pullPolicy | quote }} + command: + - /bin/ash + - -ecx + - >- + /request-cert + -namespace=${POD_NAMESPACE} + -certs-dir=/cockroach-certs/ + -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + -type=client + -user=root + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs/ + {{- end }} + {{- with .Values.init.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.init.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.init.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: cluster-init + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + # Run the command in an `while true` loop because this Job is bound + # to come up before the CockroachDB Pods (due to the time needed to + # get PersistentVolumes attached to Nodes), and sleeping 5 seconds + # between attempts is much better than letting the Pod fail when + # the init command does and waiting out Kubernetes' non-configurable + # exponential back-off for Pod restarts. + # Command completes either when cluster initialization succeeds, + # or when cluster has been initialized already. + command: + - /bin/bash + - -c + - >- + while true; do + initOUT=$(set -x; + /cockroach/cockroach init + {{- if .Values.tls.enabled }} + --certs-dir=/cockroach-certs/ + {{- else }} + --insecure + {{- end }} + {{- with index .Values.conf "cluster-name" }} + --cluster-name={{.}} + {{- end }} + --host={{ template "cockroachdb.fullname" . }}-0.{{ template "cockroachdb.fullname" . -}} + :{{ .Values.service.ports.grpc.internal.port | int64 }} + 2>&1); + initRC="$?"; + echo $initOUT; + [[ "$initRC" == "0" ]] && exit 0; + [[ "$initOUT" == *"cluster has already been initialized"* ]] && exit 0; + sleep 5; + done + {{- if .Values.tls.enabled }} + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs/ + {{- end }} + {{- with .Values.init.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.tls.enabled }} + volumes: + - name: client-certs + {{- if .Values.tls.certs.provided }} + {{- if .Values.tls.certs.tlsSecret }} + projected: + sources: + - secret: + name: {{ .Values.tls.certs.clientRootSecret }} + items: + - key: ca.crt + path: ca.crt + mode: 0400 + - key: tls.crt + path: client.root.crt + mode: 0400 + - key: tls.key + path: client.root.key + mode: 0400 + {{- else }} + secret: + secretName: {{ .Values.tls.certs.clientRootSecret }} + defaultMode: 0400 + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/networkpolicy.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/networkpolicy.yaml new file mode 100644 index 000000000..1739c45e5 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/networkpolicy.yaml @@ -0,0 +1,59 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "cockroachdb.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "cockroachdb.tls.serviceAccount.name" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 6 }} + {{- end }} + ingress: + - ports: + - port: grpc + {{- with .Values.networkPolicy.ingress.grpc }} + from: + # Allow connections via custom rules. + {{- toYaml . | nindent 8 }} + # Allow client connection via pre-considered label. + - podSelector: + matchLabels: + {{ template "cockroachdb.fullname" . }}-client: "true" + # Allow other CockroachDBs to connect to form a cluster. + - podSelector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- if gt (.Values.statefulset.replicas | int64) 1 }} + # Allow init Job to connect to bootstrap a cluster. + - podSelector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.init.labels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + # Allow connections to admin UI and for Prometheus. + - ports: + - port: http + {{- with .Values.networkPolicy.ingress.http }} + from: {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/poddisruptionbudget.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..e49431f89 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +kind: PodDisruptionBudget +apiVersion: policy/v1beta1 +metadata: + name: {{ template "cockroachdb.fullname" . }}-budget + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 6 }} + {{- end }} + maxUnavailable: {{ .Values.statefulset.budget.maxUnavailable | int64 }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/role.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/role.yaml new file mode 100644 index 000000000..62a044ddc --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.tls.enabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["secrets"] + {{- if .Values.tls.certs.provided }} + verbs: ["get"] + {{- else }} + verbs: ["create", "get"] + {{- end }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/rolebinding.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/rolebinding.yaml new file mode 100644 index 000000000..c65441b42 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if .Values.tls.enabled }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cockroachdb.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cockroachdb.tls.serviceAccount.name" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/secret.registry.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/secret.registry.yaml new file mode 100644 index 000000000..3dc34f386 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/secret.registry.yaml @@ -0,0 +1,23 @@ +{{- range $name, $cred := dict "db" (.Values.image.credentials) "init-certs" (.Values.tls.init.image.credentials) }} +{{- if not (empty $cred) }} +{{- if or (and (eq $name "init-certs") $.Values.tls.enabled) (ne $name "init-certs") }} +--- +kind: Secret +apiVersion: v1 +metadata: + name: {{ template "cockroachdb.fullname" $ }}.{{ $name }}.registry + namespace: {{ $.Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" $ }} + app.kubernetes.io/name: {{ template "cockroachdb.name" $ }} + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + app.kubernetes.io/managed-by: {{ $.Release.Service | quote }} + {{- with $.Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ printf `{"auths":{%s:{"auth":"%s"}}}` ($cred.registry | quote) (printf "%s:%s" $cred.username $cred.password | b64enc) | b64enc | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/service.discovery.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/service.discovery.yaml new file mode 100644 index 000000000..3b7f5d0e7 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/service.discovery.yaml @@ -0,0 +1,62 @@ +# This service only exists to create DNS entries for each pod in +# the StatefulSet such that they can resolve each other's IP addresses. +# It does not create a load-balanced ClusterIP and should not be used directly +# by clients in most circumstances. +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.service.discovery.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + # Use this annotation in addition to the actual field below because the + # annotation will stop being respected soon, but the field is broken in + # some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running + # in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: _status/vars + prometheus.io/port: {{ .Values.service.ports.http.port | quote }} + {{- with .Values.service.discovery.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + # We want all Pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB Pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + {{- $ports := .Values.service.ports }} + # The main port, served by gRPC, serves Postgres-flavor SQL, inter-node + # traffic and the CLI. + - name: {{ $ports.grpc.external.name | quote }} + port: {{ $ports.grpc.external.port | int64 }} + targetPort: grpc + {{- if ne ($ports.grpc.internal.port | int64) ($ports.grpc.external.port | int64) }} + - name: {{ $ports.grpc.internal.name | quote }} + port: {{ $ports.grpc.internal.port | int64 }} + targetPort: grpc + {{- end }} + # The secondary port serves the UI as well as health and debug endpoints. + - name: {{ $ports.http.name | quote }} + port: {{ $ports.http.port | int64 }} + targetPort: http + selector: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/service.public.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/service.public.yaml new file mode 100644 index 000000000..529b89d83 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/service.public.yaml @@ -0,0 +1,46 @@ +# This Service is meant to be used by clients of the database. +# It exposes a ClusterIP that will automatically load balance connections +# to the different database Pods. +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cockroachdb.fullname" . }}-public + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.service.public.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- with .Values.service.public.annotations }} + annotations: {{- toYaml . | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.service.public.type | quote }} + ports: + {{- $ports := .Values.service.ports }} + # The main port, served by gRPC, serves Postgres-flavor SQL, inter-node + # traffic and the CLI. + - name: {{ $ports.grpc.external.name | quote }} + port: {{ $ports.grpc.external.port | int64 }} + targetPort: grpc + {{- if ne ($ports.grpc.internal.port | int64) ($ports.grpc.external.port | int64) }} + - name: {{ $ports.grpc.internal.name | quote }} + port: {{ $ports.grpc.internal.port | int64 }} + targetPort: grpc + {{- end }} + # The secondary port serves the UI as well as health and debug endpoints. + - name: {{ $ports.http.name | quote }} + port: {{ $ports.http.port | int64 }} + targetPort: http + selector: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/serviceaccount.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/serviceaccount.yaml new file mode 100644 index 000000000..45c3fe09c --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.tls.enabled .Values.tls.serviceAccount.create }} +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ template "cockroachdb.tls.serviceAccount.name" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/statefulset.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/statefulset.yaml new file mode 100644 index 000000000..adb116788 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/statefulset.yaml @@ -0,0 +1,334 @@ +kind: StatefulSet +apiVersion: {{ template "cockroachdb.statefulset.apiVersion" . }} +metadata: + name: {{ template "cockroachdb.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + helm.sh/chart: {{ template "cockroachdb.chart" . }} + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "cockroachdb.fullname" . }} + replicas: {{ .Values.statefulset.replicas | int64 }} + updateStrategy: {{- toYaml .Values.statefulset.updateStrategy | nindent 4 }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy | quote }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 6 }} + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.statefulset.annotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if or .Values.image.credentials (and .Values.tls.enabled .Values.tls.init.image.credentials (not .Values.tls.certs.provided)) }} + imagePullSecrets: + {{- if .Values.image.credentials }} + - name: {{ template "cockroachdb.fullname" . }}.db.registry + {{- end }} + {{- if and .Values.tls.enabled .Values.tls.init.image.credentials (not .Values.tls.certs.provided) }} + - name: {{ template "cockroachdb.fullname" . }}.init-certs.registry + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + serviceAccountName: {{ template "cockroachdb.tls.serviceAccount.name" . }} + {{- if not .Values.tls.certs.provided }} + initContainers: + # The init-certs container sends a CSR (certificate signing request) to + # the Kubernetes cluster. + # You can see pending requests using: + # kubectl get csr + # CSRs can be approved using: + # kubectl certificate approve + # + # All addresses used to contact a Node must be specified in the + # `--addresses` arg. + # + # In addition to the Node certificate and key, the init-certs entrypoint + # will symlink the cluster CA to the certs directory. + - name: init-certs + image: "{{ .Values.tls.init.image.repository }}:{{ .Values.tls.init.image.tag }}" + imagePullPolicy: {{ .Values.tls.init.image.pullPolicy | quote }} + command: + - /bin/ash + - -ecx + - >- + /request-cert + -namespace=${POD_NAMESPACE} + -certs-dir=/cockroach-certs/ + -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + -type=node + -addresses=localhost,127.0.0.1,$(hostname -f),$(hostname -f|cut -f 1-2 -d '.'),{{ template "cockroachdb.fullname" . }}-public,{{ template "cockroachdb.fullname" . }}-public.$(hostname -f|cut -f 3- -d '.') + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: certs + mountPath: /cockroach-certs/ + {{- end }} + {{- end }} + {{- if or .Values.statefulset.nodeAffinity .Values.statefulset.podAffinity .Values.statefulset.podAntiAffinity }} + affinity: + {{- with .Values.statefulset.nodeAffinity }} + nodeAffinity: {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.statefulset.podAffinity }} + podAffinity: {{- toYaml . | nindent 10 }} + {{- end }} + {{- if .Values.statefulset.podAntiAffinity }} + podAntiAffinity: + {{- if .Values.statefulset.podAntiAffinity.type }} + {{- if eq .Values.statefulset.podAntiAffinity.type "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 18 }} + {{- end }} + {{- else if eq .Values.statefulset.podAntiAffinity.type "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: {{ .Values.statefulset.podAntiAffinity.weight | int64 }} + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.statefulset.labels }} + {{- toYaml . | nindent 20 }} + {{- end }} + {{- end }} + {{- else }} + {{- toYaml .Values.statefulset.podAntiAffinity | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.statefulset.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.statefulset.priorityClassName }} + priorityClassName: {{ .Values.statefulset.priorityClassName }} + {{- end }} + {{- with .Values.statefulset.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + containers: + - name: db + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + args: + - shell + - -ecx + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + # + # `--join` CLI flag is hardcoded to exactly 3 Pods, because: + # 1. Having `--join` value depending on `statefulset.replicas` + # will trigger undesired restart of existing Pods when + # StatefulSet is scaled up/down. We want to scale without + # restarting existing Pods. + # 2. At least one Pod in `--join` is enough to successfully + # join CockroachDB cluster and gossip with all other existing + # Pods, even if there are 3 or more Pods. + # 3. It's harmless for `--join` to have 3 Pods even for 1-Pod + # clusters, while it gives us opportunity to scale up even if + # some Pods of existing cluster are down (for whatever reason). + # See details explained here: + # https://github.com/helm/charts/pull/18993#issuecomment-558795102 + - >- + exec /cockroach/cockroach + {{- if index .Values.conf `single-node` }} + start-single-node + {{- else }} + start --join= + {{- if .Values.conf.join }} + {{- join `,` .Values.conf.join -}} + {{- else }} + {{- range $i, $_ := until 3 -}} + {{- if gt $i 0 -}},{{- end -}} + ${STATEFULSET_NAME}-{{ $i }}.${STATEFULSET_FQDN}:{{ $.Values.service.ports.grpc.internal.port | int64 -}} + {{- end -}} + {{- end }} + --advertise-host=$(hostname).${STATEFULSET_FQDN} + {{- with index .Values.conf `cluster-name` }} + --cluster-name={{ . }} + {{- if index $.Values.conf `disable-cluster-name-verification` }} + --disable-cluster-name-verification + {{- end }} + {{- end }} + {{- end }} + --logtostderr={{ .Values.conf.logtostderr }} + {{- if .Values.tls.enabled }} + --certs-dir=/cockroach/cockroach-certs/ + {{- else }} + --insecure + {{- end }} + {{- with .Values.conf.attrs }} + --attrs={{ join `:` . }} + {{- end }} + --http-port={{ index .Values.conf `http-port` | int64 }} + --port={{ .Values.conf.port | int64 }} + --cache={{ .Values.conf.cache }} + --max-disk-temp-storage={{ index .Values.conf `max-disk-temp-storage` }} + --max-offset={{ index .Values.conf `max-offset` }} + --max-sql-memory={{ index .Values.conf `max-sql-memory` }} + {{- with .Values.conf.locality }} + --locality={{ . }} + {{- end }} + {{- with index .Values.conf `sql-audit-dir` }} + --sql-audit-dir={{ . }} + {{- end }} + {{- range .Values.statefulset.args }} + {{ . }} + {{- end }} + env: + - name: STATEFULSET_NAME + value: {{ template "cockroachdb.fullname" . }} + - name: STATEFULSET_FQDN + value: {{ template "cockroachdb.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: COCKROACH_CHANNEL + value: kubernetes-helm + {{- with .Values.statefulset.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: grpc + containerPort: {{ .Values.conf.port | int64 }} + protocol: TCP + - name: http + containerPort: {{ index .Values.conf `http-port` | int64 }} + protocol: TCP + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data/ + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /cockroach/cockroach-certs/ + {{- end }} + {{- range .Values.statefulset.secretMounts }} + - name: {{ printf "secret-%s" . | quote }} + mountPath: {{ printf "/etc/cockroach/secrets/%s" . | quote }} + readOnly: true + {{- end }} + livenessProbe: + httpGet: + path: /health + port: http + {{- if .Values.tls.enabled }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: 30 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /health?ready=1 + port: http + {{- if .Values.tls.enabled }} + scheme: HTTPS + {{- end }} + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + {{- with .Values.statefulset.resources }} + resources: {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: datadir + {{- if .Values.storage.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: datadir + {{- else if .Values.storage.hostPath }} + hostPath: + path: {{ .Values.storage.hostPath | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + {{- if .Values.tls.certs.provided }} + {{- if .Values.tls.certs.tlsSecret }} + projected: + sources: + - secret: + name: {{ .Values.tls.certs.nodeSecret }} + items: + - key: ca.crt + path: ca.crt + mode: 0400 + - key: tls.crt + path: node.crt + mode: 0400 + - key: tls.key + path: node.key + mode: 0400 + {{- else }} + secret: + secretName: {{ .Values.tls.certs.nodeSecret }} + defaultMode: 0400 + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.statefulset.secretMounts }} + - name: {{ printf "secret-%s" . | quote }} + secret: + secretName: {{ . | quote }} + {{- end }} +{{- if .Values.storage.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: datadir + labels: + app.kubernetes.io/name: {{ template "cockroachdb.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- with .Values.storage.persistentVolume.labels }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.labels }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.storage.persistentVolume.annotations }} + annotations: {{- toYaml . | nindent 10 }} + {{- end }} + spec: + accessModes: ["ReadWriteOnce"] + {{- if .Values.storage.persistentVolume.storageClass }} + {{- if (eq "-" .Values.storage.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.storage.persistentVolume.storageClass | quote}} + {{- end }} + {{- end }} + resources: + requests: + storage: {{ .Values.storage.persistentVolume.size | quote }} +{{- end }} diff --git a/charts/cockroachdb/cockroachdb/4.1.200/templates/tests/client.yaml b/charts/cockroachdb/cockroachdb/4.1.200/templates/tests/client.yaml new file mode 100644 index 000000000..8c8f96be7 --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/templates/tests/client.yaml @@ -0,0 +1,65 @@ +kind: Pod +apiVersion: v1 +metadata: + name: {{ template "cockroachdb.fullname" . }}-test + namespace: {{ .Release.Namespace | quote }} +{{- if .Values.networkPolicy.enabled }} + labels: + {{ template "cockroachdb.fullname" . }}-client: "true" +{{- end }} + annotations: + helm.sh/hook: test-success +spec: + restartPolicy: Never +{{- if .Values.image.credentials }} + imagePullSecrets: + - name: {{ template "cockroachdb.fullname" . }}.db.registry +{{- end }} + {{- if .Values.tls.certs.provided }} + volumes: + - name: client-certs + {{- if .Values.tls.certs.tlsSecret }} + projected: + sources: + - secret: + name: {{ .Values.tls.certs.clientRootSecret }} + items: + - key: ca.crt + path: ca.crt + mode: 0400 + - key: tls.crt + path: client.root.crt + mode: 0400 + - key: tls.key + path: client.root.key + mode: 0400 + {{- else }} + secret: + secretName: {{ .Values.tls.certs.clientRootSecret }} + defaultMode: 0400 + {{- end }} + {{- end }} + containers: + - name: client-test + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.tls.certs.provided }} + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + {{- end }} + command: + - /cockroach/cockroach + - sql + {{- if .Values.tls.certs.provided }} + - --certs-dir + - /cockroach-certs + {{- else }} + - --insecure + {{- end}} + - --host + - {{ template "cockroachdb.fullname" . }}-public.{{ .Release.Namespace }} + - --port + - {{ .Values.service.ports.grpc.external.port | quote }} + - -e + - SHOW DATABASES; diff --git a/charts/cockroachdb/cockroachdb/4.1.200/values.yaml b/charts/cockroachdb/cockroachdb/4.1.200/values.yaml new file mode 100644 index 000000000..0ed532d7e --- /dev/null +++ b/charts/cockroachdb/cockroachdb/4.1.200/values.yaml @@ -0,0 +1,382 @@ +image: + repository: cockroachdb/cockroach + tag: v20.1.3 + pullPolicy: IfNotPresent + credentials: {} + # registry: docker.io + # username: john_doe + # password: changeme + + +# Additional labels to apply to all Kubernetes resources created by this chart. +labels: {} + # app.kubernetes.io/part-of: my-app + + +# Cluster's default DNS domain. +# You should overwrite it if you're using a different one, +# otherwise CockroachDB nodes discovery won't work. +clusterDomain: cluster.local + + +conf: + # An ordered list of CockroachDB node attributes. + # Attributes are arbitrary strings specifying machine capabilities. + # Machine capabilities might include specialized hardware or number of cores + # (e.g. "gpu", "x16c"). + attrs: [] + # - x16c + # - gpu + + # Total size in bytes for caches, shared evenly if there are multiple + # storage devices. Size suffixes are supported (e.g. `1GB` and `1GiB`). + # A percentage of physical memory can also be specified (e.g. `.25`). + cache: 25% + + # Sets a name to verify the identity of a cluster. + # The value must match between all nodes specified via `conf.join`. + # This can be used as an additional verification when either the node or + # cluster, or both, have not yet been initialized and do not yet know their + # cluster ID. + # To introduce a cluster name into an already-initialized cluster, pair this + # option with `conf.disable-cluster-name-verification: yes`. + cluster-name: "" + + # Tell the server to ignore `conf.cluster-name` mismatches. + # This is meant for use when opting an existing cluster into starting to use + # cluster name verification, or when changing the cluster name. + # The cluster should be restarted once with `conf.cluster-name` and + # `conf.disable-cluster-name-verification: yes` combined, and once all nodes + # have been updated to know the new cluster name, the cluster can be restarted + # again with `conf.disable-cluster-name-verification: no`. + # This option has no effect if `conf.cluster-name` is not specified. + disable-cluster-name-verification: false + + # The addresses for connecting a CockroachDB nodes to an existing cluster. + # If you are deploying a second CockroachDB instance that should join a first + # one, use the below list to join to the existing instance. + # Each item in the array should be a FQDN (and port if needed) resolvable by + # new Pods. + join: [] + + # Logs at or above this threshold to STDERR. + logtostderr: INFO + + # Maximum storage capacity available to store temporary disk-based data for + # SQL queries that exceed the memory budget (e.g. join, sorts, etc are + # sometimes able to spill intermediate results to disk). + # Accepts numbers interpreted as bytes, size suffixes (e.g. `32GB` and + # `32GiB`) or a percentage of disk size (e.g. `10%`). + # The location of the temporary files is within the first store dir. + # If expressed as a percentage, `max-disk-temp-storage` is interpreted + # relative to the size of the storage device on which the first store is + # placed. The temp space usage is never counted towards any store usage + # (although it does share the device with the first store) so, when + # configuring this, make sure that the size of this temp storage plus the size + # of the first store don't exceed the capacity of the storage device. + # If the first store is an in-memory one (i.e. `type=mem`), then this + # temporary "disk" data is also kept in-memory. + # A percentage value is interpreted as a percentage of the available internal + # memory. + max-disk-temp-storage: 0 + + # Maximum allowed clock offset for the cluster. If observed clock offsets + # exceed this limit, servers will crash to minimize the likelihood of + # reading inconsistent data. Increasing this value will increase the time + # to recovery of failures as well as the frequency of uncertainty-based + # read restarts. + # Note, that this value must be the same on all nodes in the cluster. + # In order to change it, all nodes in the cluster must be stopped + # simultaneously and restarted with the new value. + max-offset: 500ms + + # Maximum memory capacity available to store temporary data for SQL clients, + # including prepared queries and intermediate data rows during query + # execution. Accepts numbers interpreted as bytes, size suffixes + # (e.g. `1GB` and `1GiB`) or a percentage of physical memory (e.g. `.25`). + max-sql-memory: 25% + + # An ordered, comma-separated list of key-value pairs that describe the + # topography of the machine. Topography might include country, datacenter + # or rack designations. Data is automatically replicated to maximize + # diversities of each tier. The order of tiers is used to determine + # the priority of the diversity, so the more inclusive localities like + # country should come before less inclusive localities like datacenter. + # The tiers and order must be the same on all nodes. Including more tiers + # is better than including fewer. For example: + # locality: country=us,region=us-west,datacenter=us-west-1b,rack=12 + # locality: country=ca,region=ca-east,datacenter=ca-east-2,rack=4 + # locality: planet=earth,province=manitoba,colo=secondary,power=3 + locality: "" + + # Run CockroachDB instances in standalone mode with replication disabled + # (replication factor = 1). + # Enabling this option makes the following values to be ignored: + # - `conf.cluster-name` + # - `conf.disable-cluster-name-verification` + # - `conf.join` + # + # WARNING: Enabling this option makes each deployed Pod as a STANDALONE + # CockroachDB instance, so the StatefulSet does NOT FORM A CLUSTER. + # Don't use this option for production deployments unless you clearly + # understand what you're doing. + # Usually, this option is intended to be used in conjunction with + # `statefulset.replicas: 1` for temporary one-time deployments (like + # running E2E tests, for example). + single-node: false + + # If non-empty, create a SQL audit log in the specified directory. + sql-audit-dir: "" + + # CockroachDB's port to listen to inter-communications and client connections. + port: 26257 + + # CockroachDB's port to listen to HTTP requests. + http-port: 8080 + + +statefulset: + replicas: 3 + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + budget: + maxUnavailable: 1 + + # List of additional command-line arguments you want to pass to the + # `cockroach start` command. + args: [] + # - --disable-cluster-name-verification + + # List of extra environment variables to pass into container + env: [] + # - name: COCKROACH_ENGINE_MAX_SYNC_DURATION + # value: "24h" + + # List of Secrets names in the same Namespace as the CockroachDB cluster, + # which shall be mounted into `/etc/cockroach/secrets/` for every cluster + # member. + secretMounts: [] + + # Additional labels to apply to this StatefulSet and all its Pods. + labels: + app.kubernetes.io/component: cockroachdb + + # Additional annotations to apply to the Pods of this StatefulSet. + annotations: {} + + # Affinity rules for scheduling Pods of this StatefulSet on Nodes. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + nodeAffinity: {} + # Inter-Pod Affinity rules for scheduling Pods of this StatefulSet. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity + podAffinity: {} + # Anti-affinity rules for scheduling Pods of this StatefulSet. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity + # You may either toggle options below for default anti-affinity rules, + # or specify the whole set of anti-affinity rules instead of them. + podAntiAffinity: + # Type of anti-affinity rules: either `soft`, `hard` or empty value (which + # disables anti-affinity rules). + type: soft + # Weight for `soft` anti-affinity rules. + # Does not apply for other anti-affinity types. + weight: 100 + + # Node selection constraints for scheduling Pods of this StatefulSet. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + nodeSelector: {} + + # PriorityClassName given to Pods of this StatefulSet + # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + priorityClassName: "" + + # Taints to be tolerated by Pods of this StatefulSet. + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + + # Uncomment the following resources definitions or pass them from + # command line to control the CPU and memory resources allocated + # by Pods of this StatefulSet. + resources: {} + # limits: + # cpu: 100m + # memory: 512Mi + # requests: + # cpu: 100m + # memory: 512Mi + + +service: + ports: + # You can set a different external and internal gRPC ports and their name. + grpc: + external: + port: 26257 + name: grpc + # If the port number is different than `external.port`, then it will be + # named as `internal.name` in Service. + internal: + port: 26257 + # If using Istio set it to `cockroach`. + name: grpc-internal + http: + port: 8080 + name: http + + # This Service is meant to be used by clients of the database. + # It exposes a ClusterIP that will automatically load balance connections + # to the different database Pods. + public: + type: ClusterIP + # Additional labels to apply to this Service. + labels: + app.kubernetes.io/component: cockroachdb + # Additional annotations to apply to this Service. + annotations: {} + + # This service only exists to create DNS entries for each pod in + # the StatefulSet such that they can resolve each other's IP addresses. + # It does not create a load-balanced ClusterIP and should not be used directly + # by clients in most circumstances. + discovery: + # Additional labels to apply to this Service. + labels: + app.kubernetes.io/component: cockroachdb + # Additional annotations to apply to this Service. + annotations: {} + +# CockroachDB's ingress for web ui. +ingress: + enabled: false + labels: {} + annotations: {} + # kubernetes.io/ingress.class: nginx + # cert-manager.io/cluster-issuer: letsencrypt + paths: [/] + hosts: [] + # - cockroachlabs.com + tls: [] + # - hosts: [cockroachlabs.com] + # secretName: cockroachlabs-tls + +# CockroachDB's data persistence. +# If neither `persistentVolume` nor `hostPath` is used, then data will be +# persisted in ad-hoc `emptyDir`. +storage: + # Absolute path on host to store CockroachDB's data. + # If not specified, then `emptyDir` will be used instead. + # If specified, but `persistentVolume.enabled` is `true`, then has no effect. + hostPath: "" + + # If `enabled` is `true` then a PersistentVolumeClaim will be created and + # used to store CockroachDB's data, otherwise `hostPath` is used. + persistentVolume: + enabled: true + + size: 100Gi + + # If defined, then `storageClassName: `. + # If set to "-", then `storageClassName: ""`, which disables dynamic + # provisioning. + # If undefined or empty (default), then no `storageClassName` spec is set, + # so the default provisioner will be chosen (gp2 on AWS, standard on + # GKE, AWS & OpenStack). + storageClass: "" + + # Additional labels to apply to the created PersistentVolumeClaims. + labels: {} + # Additional annotations to apply to the created PersistentVolumeClaims. + annotations: {} + + +# Kubernetes Job which initializes multi-node CockroachDB cluster. +# It's not created if `statefulset.replicas` is `1`. +init: + # Additional labels to apply to this Job and its Pod. + labels: + app.kubernetes.io/component: init + + # Additional annotations to apply to the Pod of this Job. + annotations: {} + + # Affinity rules for scheduling the Pod of this Job. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + + # Node selection constraints for scheduling the Pod of this Job. + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + nodeSelector: {} + + # Taints to be tolerated by the Pod of this Job. + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # The init Pod runs at cluster creation to initialize CockroachDB. It finishes + # quickly and doesn't continue to consume resources in the Kubernetes + # cluster. Normally, you should leave this section commented out, but if your + # Kubernetes cluster uses Resource Quotas and requires all pods to specify + # resource requests or limits, you can set those here. + resources: {} + # requests: + # cpu: "10m" + # memory: "128Mi" + # limits: + # cpu: "10m" + # memory: "128Mi" + + +# Whether to run securely using TLS certificates. +tls: + enabled: false + serviceAccount: + # Specifies whether this ServiceAccount should be created. + create: true + # The name of this ServiceAccount to use. + # If not set and `create` is `true`, then a name is auto-generated. + name: "" + certs: + # Bring your own certs scenario. If provided, tls.init section will be ignored. + provided: false + # Secret name for the client root cert. + clientRootSecret: cockroachdb-root + # Secret name for node cert. + nodeSecret: cockroachdb-node + # Enable if the secret is a dedicated TLS. + # TLS secrets are created by cert-mananger, for example. + tlsSecret: false + + init: + # Image to use for requesting TLS certificates. + image: + repository: cockroachdb/cockroach-k8s-request-cert + tag: "0.4" + pullPolicy: IfNotPresent + credentials: {} + # registry: docker.io + # username: john_doe + # password: changeme + + +networkPolicy: + enabled: false + + ingress: + # List of sources which should be able to access the CockroachDB Pods via + # gRPC port. Items in this list are combined using a logical OR operation. + # Rules for allowing inter-communication are applied automatically. + # If empty, then connections from any Pod is allowed. + grpc: [] + # - podSelector: + # matchLabels: + # app.kubernetes.io/name: my-app-django + # app.kubernetes.io/instance: my-app + + # List of sources which should be able to access the CockroachDB Pods via + # HTTP port. Items in this list are combined using a logical OR operation. + # If empty, then connections from any Pod is allowed. + http: [] + # - namespaceSelector: + # matchLabels: + # project: my-project diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/.helmignore b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/Chart.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/Chart.yaml new file mode 100644 index 000000000..2831183b3 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: csi-wekafsplugin +apiVersion: v2 +appVersion: 0.6.4 +description: Helm chart for Deployment of WekaIO Container Storage Interface (CSI) plugin for WekaFS - the world fastest filesystem +home: https://github.com/weka/csi-wekafs +icon: https://weka.github.io/csi-wekafs/logo.png +name: csi-wekafsplugin +sources: +- https://github.com/weka/csi-wekafs/tree/v0.6.4/deploy/helm/csi-wekafsplugin +type: application +version: 0.6.400 diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/LOCAL.md b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/LOCAL.md new file mode 100644 index 000000000..de09f09f3 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/LOCAL.md @@ -0,0 +1,28 @@ +# Overview +Helm Chart for Weka wekafs CSI driver deployment + +# Usage +## Install driver + + - Optionally modify values.yaml + - Create a name space for CSI driver deployment by issuing + ``` + kubectl create namespace csi-wekafsplugin + ``` + - Install the driver: + ``` + helm install csi-wekafsplugin --namespace csi-wekafsplugin -n=csi-wekafsplugin ./ + ``` + +## Uninstall driver +To uninstall a driver, issue the following command +``` +helm uninstall csi-wekafsplugin --namespace csi-wekafsplugin -n=csi-wekafsplugin +``` + +# Upgrade +To upgrade from versions before v0.6.0, first uninstall the previous version using cleanup script: +``` +./deploy/kubernetes-latest/cleanup.sh +``` +Then install as usual. \ No newline at end of file diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/README.md b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/README.md new file mode 100644 index 000000000..d24f05dd7 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/README.md @@ -0,0 +1,23 @@ +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/csi-wekafs)](https://artifacthub.io/packages/search?repo=csi-wekafs) +# CSI WekaFS Driver + +This repository hosts the CSI WekaFS driver and all of its build and dependent configuration files to deploy the driver. + +## Pre-requisite +- Kubernetes cluster of version 1.18 and up, 1.19 and up recommended, 1.13 and up should work but were not tested. +- Helm v3 must be installed and configured properly +- Weka system pre-configured and Weka client installed and registered in cluster for each Kubernetes node + +## Deployment +```shell +helm repo add csi-wekafs https://weka.github.io/csi-wekafs +helm install csi-wekafsplugin csi-wekafs/csi-wekafsplugin --namespace csi-wekafsplugin --create-namespace +``` + +## Usage +- [Deploy an Example application](https://github.com/weka/csi-wekafs/blob/master/docs/usage.md) + +## Additional Documentation +- [Official Weka CSI Plugin documentation](https://docs.weka.io/appendix/weka-csi-plugin) + diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/app-readme.md b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/app-readme.md new file mode 100644 index 000000000..880868b3e --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/app-readme.md @@ -0,0 +1,20 @@ +# WekaIO CSI Plugin +[Weka's](https://weka.io) parallel file system delivers the highest performance for the most data-intensive workloads, +powering solutions to problems the world has never seen before. + +This repository hosts the CSI WekaFS driver and all of its build and dependent configuration files to deploy the driver. + +## Pre-requisite +- Kubernetes cluster +- Helm v3 +- Running version 1.18 or later. Although older versions from 1.13 and up should work, they were not tested +- Weka system pre-configured and Weka client installed and registered in cluster for each Kubernetes node +## Deployment +```shell +helm repo add csi-wekafs https://weka.github.io/csi-wekafs +helm install csi-wekafsplugin csi-wekafs/csi-wekafsplugin --namespace csi-wekafsplugin --create-namespace +``` +## Usage +- [Deploy an Example application](https://github.com/weka/csi-wekafs/blob/master/docs/usage.md) +## Additional Documentation +- [Official Weka CSI Plugin documentation](https://docs.weka.io/appendix/weka-csi-plugin) diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/questions.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/questions.yaml new file mode 100644 index 000000000..fd0d25dd8 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/questions.yaml @@ -0,0 +1,6 @@ +questions: + - variable: dynamicProvisionPath + default: "csi-volumes" + required: false + label: Path for creation of dynamic volumes (relative to filesystem root) + group: "Global Settings" diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/NOTES.txt b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/NOTES.txt new file mode 100644 index 000000000..426acbc08 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/NOTES.txt @@ -0,0 +1,10 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} + $ helm get all {{ .Release.Name }} + +Official Weka CSI Plugin documentation can be found here: https://docs.weka.io/appendix/weka-csi-plugin diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/controllerserver.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/controllerserver.yaml new file mode 100644 index 000000000..48f385f99 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/controllerserver.yaml @@ -0,0 +1,242 @@ +apiVersion: v1 +kind: ServiceAccount +imagePullSecrets: +- name: {{ .Release.Name }}-creds +metadata: + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace }} + +--- +# cluster role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-controller +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "create", "get", "list", "watch", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + +--- +# cluster role binding +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-controller +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-controller + apiGroup: rbac.authorization.k8s.io + +--- +# stateful set of controller +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name }}-controller + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: {{ .Release.Name }}-controller + serviceName: {{ .Release.Name }}-controller + replicas: 1 + template: + metadata: + labels: + app: {{ .Release.Name }}-controller + spec: + serviceAccountName: {{ .Release.Name }}-controller + containers: + - name: csi-attacher + image: {{ required "csi attacher sidercar image." .Values.images.attachersidecar }} + securityContext: + privileged: true + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-provisioner + image: {{ required "csi provisioner sidecar container image." .Values.images.provisionersidecar }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: "/csi" + - name: csi-resizer + image: {{ required "csi attacher sidercar image." .Values.images.resizersidecar }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + + - name: wekafs + securityContext: + privileged: true + image: {{ .Values.images.csidriver }}:v{{ .Values.images.csidriverTag }} + imagePullPolicy: Always + args: + - "--drivername=$(CSI_DRIVER_NAME)" + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--dynamic-path=$(CSI_DYNAMIC_PATH)" + - "--csimode=$(X_CSI_MODE)" + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CSI_DRIVER_NAME + value: {{ required "Provide CSI Driver Name" .Values.csiDriverName }} + - name: CSI_DRIVER_VERSION + value: {{ required "Provide CSI Driver version" .Values.csiDriverVersion }} + - name: X_CSI_MODE + value: controller + - name: CSI_DYNAMIC_PATH + value: {{ required "Provide CSI Driver Dynamic Volume Creation Path" .Values.dynamicProvisionPath }} + - name: X_CSI_DEBUG + value: "false" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + - mountPath: /dev + name: dev-dir + + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: {{ required "Provide Liveness Probe image." .Values.images.livenessprobesidecar }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--health-port=$(HEALTH_PORT)" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + - name: HEALTH_PORT + value: "9898" + {{- with .Values.controllerPluginTolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-wekafs + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-wekafs-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/driver.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/driver.yaml new file mode 100644 index 000000000..83e1d8689 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/driver.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: {{ required "Provide CSI Driver Name" .Values.csiDriverName }} +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: + - Persistent diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/nodeserver.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/nodeserver.yaml new file mode 100644 index 000000000..76ce743d5 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/templates/nodeserver.yaml @@ -0,0 +1,182 @@ +apiVersion: v1 +kind: ServiceAccount +imagePullSecrets: +- name: {{ .Release.Name }}-creds +metadata: + name: {{ .Release.Name }}-node + namespace: {{ .Release.Namespace }} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-node +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["create", "delete", "get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumesclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-node +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-node + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }}-node + apiGroup: rbac.authorization.k8s.io +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name }}-node + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: {{ .Release.Name }}-node + template: + metadata: + labels: + app: {{ .Release.Name }}-node + spec: + serviceAccountName: {{ .Release.Name }}-node + hostNetwork: true + containers: + - name: wekafs + securityContext: + privileged: true + image: {{ .Values.images.csidriver }}:v{{ .Values.images.csidriverTag }} + imagePullPolicy: Always + args: + - "--v=5" + - "--drivername=$(CSI_DRIVER_NAME)" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--dynamic-path=$(CSI_DYNAMIC_PATH)" + - "--csimode=$(X_CSI_MODE)" + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + env: + - name: CSI_DRIVER_NAME + value: {{ required "Provide CSI Driver Name" .Values.csiDriverName }} + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_DYNAMIC_PATH + value: {{ required "Provide CSI Driver Dynamic Volume Creation Path" .Values.dynamicProvisionPath }} + - name: X_CSI_MODE + value: node + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + - mountPath: /dev + name: dev-dir + + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: {{ required "Provide Liveness Probe image." .Values.images.livenessprobesidecar }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--health-port=$(HEALTH_PORT)" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + - name: HEALTH_PORT + value: "9898" + + - name: csi-registrar + image: {{ required "Provide the csi node registrar sidecar container image." .Values.images.registrarsidecar }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi-wekafs/csi.sock" + securityContext: + privileged: true + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + {{- with .Values.nodePluginTolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-wekafs + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-wekafs-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.schema.json b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.schema.json new file mode 100644 index 000000000..763641204 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.schema.json @@ -0,0 +1,104 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "controllerPluginTolerations": { + "type": "array", + "description": "CSI Controller component tolerations", + "items": { + "type": "object", + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + } + } + } + }, + "csiDriverName": { + "type": "string", + "description": "Override name of CSI driver" + }, + "csiDriverVersion": { + "type": "string" + }, + "dynamicProvisionPath": { + "type": "string", + "description": "Root path of dynamic volumes (relative to filesystem root)" + }, + "globalPluginTolerations": { + "type": "array", + "description": "Global tolerations for all plugin components", + "items": { + "type": "object", + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + } + } + } + }, + "images": { + "type": "object", + "properties": { + "attachersidecar": { + "type": "string", + "description": "Path to Docker image of attachersidecar container" + }, + "csidriver": { + "type": "string", + "description": "Path to Docker image of csidriver container" + }, + "csidriverTag": { + "type": "string", + "description": "Path to Docker image of csidriverTag container" + }, + "livenessprobesidecar": { + "type": "string", + "description": "Path to Docker image of livenessprobesidecar container" + }, + "provisionersidecar": { + "type": "string", + "description": "Path to Docker image of provisionersidecar container" + }, + "registrarsidecar": { + "type": "string", + "description": "Path to Docker image of registrarsidecar container" + }, + "resizersidecar": { + "type": "string", + "description": "Path to Docker image of resizersidecar container" + } + } + }, + "nodePluginTolerations": { + "type": "array", + "description": "CSI Node component tolerations", + "items": { + "type": "object", + "properties": { + "effect": { + "type": "string" + }, + "key": { + "type": "string" + }, + "operator": { + "type": "string" + } + } + } + } + } +} diff --git a/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.yaml b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.yaml new file mode 100644 index 000000000..caf492e31 --- /dev/null +++ b/charts/csi-wekafs/csi-wekafsplugin/0.6.400/values.yaml @@ -0,0 +1,40 @@ +# Default values for csi-wekafsplugin. + +# directory in root of file system where dynamic volumes are provisioned +dynamicProvisionPath: "csi-volumes" + +# name of the driver +# note same name will be used for provisioner name +csiDriverName : "csi.weka.io" +csiDriverVersion : &csiDriverVersion "0.6.4" + +# Image paths +images: + # "images.liveness-probe-sidecar" defines the container image used for the csi liveness probe sidecar + livenessprobesidecar: quay.io/k8scsi/livenessprobe:v1.1.0 + + # "images.attacher-sidercar" defines the container image used for the csi attacher sidecar + attachersidecar: quay.io/k8scsi/csi-attacher:v3.0.0-rc1 + + # "images.provisioner-sidercar" defines the container image used for the csi provisioner sidecar + provisionersidecar: quay.io/k8scsi/csi-provisioner:v1.6.0 + + # "images.registrar-sidercar" defines the container images used for the csi registrar sidercar + registrarsidecar: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 + + # "images.resizer-sidercar" defines the container image used for the csi provisioner sidecar + resizersidecar: quay.io/k8scsi/csi-resizer:v0.5.0 + + # images.csidriver defines csidriver image used for external provisioning + csidriver: quay.io/weka.io/csi-wekafs + csidriverTag: *csiDriverVersion + +globalPluginTolerations: &globalPluginTolerations + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + +controllerPluginTolerations: *globalPluginTolerations + +nodePluginTolerations: *globalPluginTolerations + diff --git a/charts/datadog/datadog/2.4.200/.helmignore b/charts/datadog/datadog/2.4.200/.helmignore new file mode 100644 index 000000000..46fd89965 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for Kubernetes +OWNERS diff --git a/charts/datadog/datadog/2.4.200/CHANGELOG.md b/charts/datadog/datadog/2.4.200/CHANGELOG.md new file mode 100644 index 000000000..fe4209075 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/CHANGELOG.md @@ -0,0 +1,328 @@ +# Datadog changelog + +## Changelog is now available through Git history/GitHub tags, previous tags kept as reference + +## 2.3.41 + +* Fix issue with Kubernetes <= 1.14 and Cluster Agent's External Metrics Provider (must be 443) + +## 2.3.40 + +* Update documentation for resource requests & limits default values. + +## 2.3.39 + +* Propagate `datadog.checksd` to the clusterchecks runner to support custom checks there. + +## 2.3.38 + +* Add support of DD\_CONTAINER\_{INCLUDE,EXCLUDE}\_{METRICS,LOGS} + +## 2.3.37 + +* Add NET\_BROADCAST capability + +## 2.3.36 + +* Bump default Agent version to `7.21.1` + +## 2.3.35 + +* Add support for configuring the Datadog Admission Controller + +## 2.3.34 + +* Add support for scaling based on `DatadogMetric` CRD + +## 2.3.33 + +* Create new `datadog.podSecurity.securityContext` field to fix windows agent daemonset config. + +## 2.3.32 + +* Always add os in nodeSelector based on `targetSystem` + +## 2.3.31 + +* Fixed daemonset template for go 1.14 + +## 2.3.29 + +* Change the default port for the Cluster Agent's External Metrics Provider + from 443 to 8443. +* Document usage of `clusterAgent.env` + +## 2.3.28 + +* fix daemonset template generation if `datadog.securityContext` is set to `nil` + +## 2.3.27 + +* add systemProbe.collectDNSStats option + +## 2.3.26 + +* fix PodSecurityContext configuration + +## 2.3.25 + +* Use directly .env var YAML block for all agents (was already the case for Cluster Agent) + +## 2.3.24 + +* Allow enabling Orchestrator Explorer data collection from the process-agent + +## 2.3.23 + +* Add the possibility to create a `PodSecurityPolicy` or a `SecurityContextConstraints` (Openshift) for the Agent's Daemonset Pods. + +## 2.3.22 + +* Remove duplicate imagePullSecrets +* Fix DataDog location to useConfigMap in docs +* Adding explanation for metricsProvider.enabled + +## 2.3.21 + +* Fix additional default values in `values.yaml` to prevent errors with Helm 2.x + +## 2.3.20 + +* Fix process-agent <> system-probe communication + +## 2.3.19 + +* Fix the container-trace-agent.yaml template creates invalid yaml when `useSocketVolume` is enabled. + +## 2.3.18 + +* Support arguments in the cluster-agent container `command` value + +## 2.3.17 + +* grammar edits to datadog helm docs! +* Typo in log config + +## 2.3.16 + +* Add parameter `clusterChecksRunner.rbac.serviceAccountAnnotations` for specifying annotations for dedicated ServiceAccount for Cluster Checks runners. +* Add parameters `clusterChecksRunner.volumes` and `clusterChecksRunner.volumeMounts` that can be used for providing a secret backend to Cluster Checks runners. + +## 2.3.15 + +* Mount kernel headers in system-probe container +* Fix the mount of the `system-probe` socket in core agent +* Add parameters to enable eBPF based checks + +## 2.3.14 + +* Allow overriding the `command` to run in the cluster-agent container + +## 2.3.13 + +* Use two distinct health endpoints for liveness and readiness probes. + +## 2.3.12 + +* Fix endpoints checks scheduling between agent and cluster check runners +* Cluster Check Runner now runs without s6 (similar to other agents) + +## 2.3.11 + +* Bump the default version of the agent docker images + +## 2.3.10 + +* Add dnsConfig options to all containers + +## 2.3.9 + +* Add `clusterAgent.podLabels` variable to add labels to the Cluster Agent Pod(s) + +## 2.3.8 + +* Fix templating errors when `clusterAgent.datadog_cluster_yaml` is being used. + +## 2.3.7 + +* Fix an agent warning at startup because of a deprecated parameter + +## 2.3.6 + +* Add `affinity` parameter in `values.yaml` for cluster agent deployment + +## 2.3.5 + +* Add `DD_AC_INCLUDE` and `DD_AC_EXCLUDE` to all containers +* Add "Unix Domain Socket" support in trace-agent +* Add new parameter to specify the dogstatsd socket path on the host +* Fix typos in values.yaml +* Update "tags:" example in values.yaml +* Add "rate_limit_queries_*" in the datadog.cluster-agent prometheus check configuration + +## 2.3.4 + +* Fix default values in `values.yaml` to prevent warnings with Helm 2.x + +## 2.3.3 + +* Allow pre-release versions as docker image tag + +## 2.3.2 + +* Update the DCA RBAC to allow it to create events in the HPA + +## 2.3.1 + +* Update the example for `datadog.securityContext` + +## 2.3.0 + +* Mount the directory containing the CRI socket instead of the socket itself + This is to handle the cases where the docker daemon is restarted. + In this case, the docker daemon will recreate its docker socket and, + if the container bind-mounted directly the socket, the container would + still have access to the old socket instead of the one of the new docker + daemon. + ⚠ This version of the chart requires an agent image 7.19.0 or more recent + +## 2.2.12 + +* Adding resources for `system-probe` init container + +## 2.2.11 + +* Add documentations around secret management in the datadog helm chart. It is to upstream + requested changes in the IBM charts repository: https://github.com/IBM/charts/pull/690#discussion_r411702458 +* update `kube-state-metrics` dependency +* uncomment every values.yaml parameters for IBM chart compliancy + +## 2.2.10 + +* Remove `kubeStateMetrics` section from `values.yaml` as not used anymore + +## 2.2.9 + +* Fixing variables description in README and Migration documentation (#22031) +* Avoid volumes mount conflict between `system-probe` and `logs` volumes in the `agent`. + +## 2.2.8 + +* Mount `system-probe` socket in `agent` container when system-probe is enabled + +## 2.2.7 + +* Add "Cluster-Agent" `Event` `create` RBAC permission + +## 2.2.6 + +* Ensure the `trace-agent` computes the same hostname as the core `agent`. + by giving it access to all the elements that might be used to compute the hostname: + the `DD_CLUSTER_NAME` environment variable and the docker socket. + +## 2.2.5 + +* Fix RBAC + +## 2.2.4 + +* Move several EnvVars to `common-env-vars` to be accessible by the `trace-agent` #21991. +* Fix discrepancies migration-guide and readme reporded in #21806 and #21920. +* Fix EnvVars with integer value due to yaml. serialization, reported by #21853. +* Fix .Values.datadog.tags encoding, reported by #21663. +* Add Checksum to `xxx-cluster-agent-config` config map, reported by #21622 and contribution #21656. + +## 2.2.3 + +* Fix `datadog.dockerOrCriSocketPath` helper #21992 + +## 2.2.2 + +* Fix indentation for `clusterAgent.volumes`. + +## 2.2.1 + +* Updating `agents.useConfigMap` and `agents.customAgentConfig` parameter descriptions in the chart and main readme. + +## 2.2.0 + +* Add Windows support +* Update documentation to reflect some changes that were made default +* Enable endpoint checks by default in DCA/Agent + +## 2.1.2 + +* Fixed a bug where `DD_LEADER_ELECTION` was not set in the config init container, leading to a failure to adapt +config to this environment variable. + +## 2.1.1 + +* Add option to enable WPA in the Cluster Agent. + +## 2.1.0 + +* Changed the default for `processAgent.enabled` to `true`. + +## 2.0.14 + +* Fixed a bug where the `trace-agent` runs in the same container as `dd-agent` + +## 2.0.13 + +* Fix `system-probe` startup on latest versions of containerd. + Here is the error that this change fixes: + ``` State: Waiting + Reason: CrashLoopBackOff + Last State: Terminated + Reason: StartError + Message: failed to create containerd task: OCI runtime create failed: container_linux.go:349: starting container process caused "close exec fds: ensure /proc/self/fd is on procfs: operation not permitted": unknown + Exit Code: 128 + ``` + +## 2.0.11 + +* Add missing syscalls in the `system-probe` seccomp profile + +## 2.0.10 + +* Do not enable the `cri` check when running on a `docker` setup. + +## 2.0.7 + +* Pass expected `DD_DOGSTATSD_PORT` to datadog-agent rather than invalid `DD_DOGSTATD_PORT` + +## 2.0.6 + +* Introduces `procesAgent.processCollection` to correctly configure `DD_PROCESS_AGENT_ENABLED` for the process agent. + +## 2.0.5 + +* Honor the `datadog.env` parameter in all containers. + +## 2.0.4 + +* Honor the image pull policy in init containers. +* Pass the `DD_CRI_SOCKET_PATH` environment variable to the config init container so that it can adapt the agent config based on the CRI. + +## 2.0.3 + +* Fix templating error when `agents.useConfigMap` is set to true. +* Add DD\_APM\_ENABLED environment variable to trace agent container. + +## 2.0.2 + +* Revert the docker socket path inside the agent container to its standard location to fix #21223. + +## 2.0.1 + +* Add parameters `datadog.logs.enabled` and `datadog.logs.containerCollectAll` to replace `datadog.logsEnabled` and `datadog.logsConfigContainerCollectAll`. +* Update the migration document link in the `Readme.md`. + +### 2.0.0 + +* Remove Datadog agent deployment configuration. +* Cleanup resources labels, to fit with recommended labels. +* Cleanup useless or unused values parameters. +* each component have its own RBAC configuration (create,configuration). +* container runtime socket update values configuration simplification. +* `nameOverride` `fullnameOverride` is now optional in values.yaml. diff --git a/charts/datadog/datadog/2.4.200/Chart.yaml b/charts/datadog/datadog/2.4.200/Chart.yaml new file mode 100644 index 000000000..3a8c3bf85 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: datadog +apiVersion: v1 +appVersion: "7" +description: Datadog Agent +home: https://www.datadoghq.com +icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png +keywords: +- monitoring +- alerting +- metric +maintainers: +- email: support@datadoghq.com + name: Datadog +name: datadog +sources: +- https://app.datadoghq.com/account/settings#agent/kubernetes +- https://github.com/DataDog/datadog-agent +version: 2.4.200 diff --git a/charts/datadog/datadog/2.4.200/README.md b/charts/datadog/datadog/2.4.200/README.md new file mode 100644 index 000000000..d36ee5f2e --- /dev/null +++ b/charts/datadog/datadog/2.4.200/README.md @@ -0,0 +1,481 @@ +# Datadog + +[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). + +Datadog [offers two variants](https://hub.docker.com/r/datadog/agent/tags/), switch to a `-jmx` tag if you need to run JMX/java integrations. The chart also supports running [the standalone dogstatsd image](https://hub.docker.com/r/datadog/dogstatsd/tags/). + +See the [Datadog JMX integration](https://docs.datadoghq.com/integrations/java/) to learn more. + +## How to use Datadog Helm repository + +You need to add this repository to your Helm repositories: + +``` +helm repo add datadog https://helm.datadoghq.com +helm repo update +``` + +## Prerequisites + +Kubernetes 1.4+ or OpenShift 3.4+, note that: + +- the Datadog Agent supports Kubernetes 1.4+ +- The Datadog chart's defaults are tailored to Kubernetes 1.7.6+, see [Datadog Agent legacy Kubernetes versions documentation](https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#legacy-kubernetes-versions) for adjustments you might need to make for older versions + +## Quick start + +By default, the Datadog Agent runs in a DaemonSet. It can alternatively run inside a Deployment for special use cases. + +**Note:** simultaneous DaemonSet + Deployment installation within a single release will be deprecated in a future version, requiring two releases to achieve this. + +### Installing the Datadog Chart + +To install the chart with the release name ``, retrieve your Datadog API key from your [Agent Installation Instructions](https://app.datadoghq.com/account/settings#agent/kubernetes) and run: + +```bash +helm install --name \ + --set datadog.apiKey= datadog/datadog +``` + +By default, this Chart creates a Secret and puts an API key in that Secret. +However, you can use manually created secret by setting the `datadog.apiKeyExistingSecret` value. After a few minutes, you should see hosts and metrics being reported in Datadog. + +#### Create and provide a secret that contains your Datadog API Key + +To create a secret that contains your Datadog API key, replace the below with the API key for your organization. This secret is used in the manifest to deploy the Datadog Agent. + +```bash +DATADOG_SECRET_NAME=datadog-secret +kubectl create secret generic $DATADOG_SECRET_NAME --from-literal api-key="" --namespace="default" +``` + +**Note**: This creates a secret in the default namespace. If you are in a custom namespace, update the namespace parameter of the command before running it. + +Now, the installation command contains the reference to the secret. + +```bash +helm install --name \ + --set datadog.apiKeyExistingSecret=$DATADOG_SECRET_NAME datadog/datadog +``` + +**Note**: Provide a secret for the application key (AppKey) using the `datadog.appKeyExistingSecret` chart variable. + +### Enabling the Datadog Cluster Agent + +Read about the Datadog Cluster Agent in the [official documentation](https://docs.datadoghq.com/agent/kubernetes/cluster/). + +Run the following if you want to deploy the chart with the Datadog Cluster Agent: + +```bash +helm install --name datadog-monitoring \ + --set datadog.apiKey= \ + --set datadog.appKey= \ + --set clusterAgent.enabled=true \ + --set clusterAgent.metricsProvider.enabled=true \ + datadog/datadog +``` + +**Note**: Specifying `clusterAgent.metricsProvider.enabled=true` enables the External Metrics Server. +If you want to learn to use this feature, you can check out this [Datadog Cluster Agent walkthrough](https://github.com/DataDog/datadog-agent/blob/master/docs/cluster-agent/CUSTOM_METRICS_SERVER.md). + +The Leader Election is enabled by default in the chart for the Cluster Agent. Only the Cluster Agent(s) participate in the election, in case you have several replicas configured (using `clusterAgent.replicas`. + +#### Cluster Agent Token + +You can specify the Datadog Cluster Agent token used to secure the communication between the Cluster Agent(s) and the Agents with `clusterAgent.token`. + +**If you don't specify a token, a random one is generated at each deployment so you must use `--recreate-pods` to ensure all pod use the same token.** see[Datadog Chart notes](https://github.com/helm/charts/blob/57d3030941ad2ec2d6f97c86afdf36666658a884/datadog/datadog/templates/NOTES.txt#L49-L59) to learn more. + +### Upgrading + +#### From 1.x to 2.x + +⚠️ Migrating from 1.x to 2.x requires a manual action. + +The `datadog` chart has been refactored to regroup the `values.yaml` parameters in a more logical way. +Please follow the [migration guide](https://github.com/helm/charts/blob/master/datadog/datadog/docs/Migration_1.x_to_2.x.md) to update you `values.yaml` file. + +#### From 1.19.0 onwards + +Version `1.19.0` introduces the use of release name as full name if it contains the chart name(`datadog` in this case). +E.g. with a release name of `datadog`, this renames the `DaemonSet` from `datadog-datadog` to `datadog`. +The suggested approach is to delete the release and reinstall it. + +#### From 1.0.0 onwards + +Starting with version 1.0.0, this chart does not support deploying Agent 5.x anymore. If you cannot upgrade to Agent 6.x or later, you can use a previous version of the chart by calling helm install with `--version 0.18.0`. + +See [0.18.1's README](https://github.com/helm/charts/blob/847f737479bb78d89f8fb650db25627558fbe1f0/datadog/datadog/README.md) to see which options were supported at the time. + +### Uninstalling the Chart + +To uninstall/delete the `` deployment: + +```bash +helm delete --purge +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +As a best practice, a YAML file that specifies the values for the chart parameters should be provided to configure the chart: + +1. **Copy the default [`datadog-values.yaml`](values.yaml) value file.** +2. Set the `apiKey` parameter with your [Datadog API key](https://app.datadoghq.com/account/settings#api). +3. Upgrade the Datadog Helm chart with the new `datadog-values.yaml` file: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +See the [All configuration options](#all-configuration-options) section to discover all possibilities offered by the Datadog chart. + +### Enabling Log Collection + +Update your [datadog-values.yaml](values.yaml) file with the following log collection configuration: + +```yaml +datadog: + # (...) + logs: + enabled: true + containerCollectAll: true +``` + +then upgrade your Datadog Helm chart: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +### Enabling Process Collection + +Update your [datadog-values.yaml](values.yaml) file with the process collection configuration: + +```yaml +datadog: + # (...) + processAgent: + enabled: true + processCollection: true +``` + +then upgrade your Datadog Helm chart: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +### Enabling System Probe Collection + +The system-probe agent only runs in dedicated container environment. Update your [datadog-values.yaml](values.yaml) file with the system-probe collection configuration: + +```yaml +datadog: + # (...) + systemProbe: + # (...) + enabled: true + +# (...) +``` + +then upgrade your Datadog Helm chart: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +### Kubernetes event collection + +Use the [Datadog Cluster Agent](#enabling-the-datadog-cluster-agent) to collect Kubernetes events. Please read [the official documentation](https://docs.datadoghq.com/agent/kubernetes/event_collection/) for more context. + +Alternatively set the `datadog.leaderElection`, `datadog.collectEvents` and `rbac.create` options to `true` in order to enable Kubernetes event collection. + +### conf.d and checks.d + +The Datadog [entrypoint](https://github.com/DataDog/datadog-agent/blob/master/Dockerfiles/agent/entrypoint/89-copy-customfiles.sh) copies files with a `.yaml` extension found in `/conf.d` and files with `.py` extension in `/checks.d` to `/etc/datadog-agent/conf.d` and `/etc/datadog-agent/checks.d` respectively. + +The keys for `datadog.confd` and `datadog.checksd` should mirror the content found in their respective ConfigMaps. Update your [datadog-values.yaml](values.yaml) file with the check configurations: + +```yaml +datadog: + confd: + redisdb.yaml: |- + ad_identifiers: + - redis + - bitnami/redis + init_config: + instances: + - host: "%%host%%" + port: "%%port%%" + jmx.yaml: |- + ad_identifiers: + - openjdk + instance_config: + instances: + - host: "%%host%%" + port: "%%port_0%%" + redisdb.yaml: |- + init_config: + instances: + - host: "outside-k8s.example.com" + port: 6379 +``` + +then upgrade your Datadog Helm chart: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +For more details, please refer to [the documentation](https://docs.datadoghq.com/agent/kubernetes/integrations/). + +### Kubernetes Labels and Annotations + +To map Kubernetes node labels and pod labels and annotations to Datadog tags, provide a dictionary with kubernetes labels/annotations as keys and Datadog tags key as values in your [datadog-values.yaml](values.yaml) file: + +```yaml +nodeLabelsAsTags: + beta.kubernetes.io/instance-type: aws_instance_type + kubernetes.io/role: kube_role +``` + +```yaml +podAnnotationsAsTags: + iam.amazonaws.com/role: kube_iamrole +``` + +```yaml +podLabelsAsTags: + app: kube_app + release: helm_release +``` + +then upgrade your Datadog Helm chart: + +```bash +helm upgrade -f datadog-values.yaml datadog/datadog --recreate-pods +``` + +### CRI integration + +As of the version 6.6.0, the Datadog Agent supports collecting metrics from any container runtime interface used in your cluster. Configure the location path of the socket with `datadog.criSocketPath`; default is the Docker container runtime socket. To deactivate this support, you just need to unset the `datadog.criSocketPath` setting. +Standard paths are: + +- Docker socket: `/var/run/docker.sock` +- Containerd socket: `/var/run/containerd/containerd.sock` +- Cri-o socket: `/var/run/crio/crio.sock` + +## All configuration options + +The following table lists the configurable parameters of the Datadog chart and their default values. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +helm install --name \ + --set datadog.apiKey=,datadog.logLevel=DEBUG \ + datadog/datadog +``` + +| Parameter | Description | Default | +|--------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------| +| `targetSystem` | Target OS of this installation (supported: `linux`, `windows`) | `linux` | +| `datadog.apiKey` | Your Datadog API key | `nil` You must provide your own key | +| `datadog.apiKeyExistingSecret` | If set, use the secret with a provided name instead of creating a new one | `nil` | +| `datadog.appKey` | Datadog APP key required to use metricsProvider | `nil` You must provide your own key | +| `datadog.appKeyExistingSecret` | If set, use the secret with a provided name instead of creating a new one | `nil` | +| `agents.image.repository` | The image repository to pull from | `datadog/agent` | +| `agents.image.tag` | The image tag to pull | `7.19.0` | +| `agents.image.doNotCheckTag` | By default, the helm chart will check that the version provided in `agents.image.tag` is superior to the minimal version requested by the chart. If `doNotCheckTag` is explicitly set to `true`, this check is skipped. This is useful for custom tags that are not respecting semantic versioning. | `false` | +| `agents.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `agents.image.pullSecrets` | Image pull secrets | `nil` | +| `nameOverride` | Override name of app | `""` | +| `fullnameOverride` | Override full name of app | `""` | +| `agents.rbac.create` | If true, create & use RBAC resources | `true` | +| `agents.rbac.serviceAccountName` | existing ServiceAccount to use (ignored if rbac.create=true) | `default` | +| `datadog.site` | Site ('datadoghq.com' or 'datadoghq.eu') | `nil` | +| `datadog.dd_url` | Datadog intake server | `nil` | +| `datadog.env` | Additional Datadog environment variables | `nil` | +| `datadog.logLevel` | Agent log verbosity (possible values: trace, debug, info, warn, error, critical, and off) | `INFO` | +| `datadog.logs.enabled` | Enable log collection | `nil` | +| `datadog.logs.containerCollectAll` | Collect logs from all containers | `nil` | +| `datadog.logs.containerCollectUsingFiles` | Collect container logs from files on disk instead of container runtime API | `true` | +| `datadog.apm.enabled` | Enable tracing from the host | `false` | +| `datadog.apm.port` | Used to override the default agent APM Port | `8126` | +| `datadog.apm.useSocketVolume` | Enable APM over Unix Domain Socket | `False` | +| `datadog.apm.socketPath` | Custom path to the socket, has to be located in the `/var/run/datadog/` folder path | `/var/run/datadog/apm.socket` | +| `datadog.apm.hostPath` | host directory that contains the trace-agent socket path | `/var/run/datadog/` | +| `datadog.clusterChecks.enabled` | Enable Cluster Checks on both the Cluster Agent and the Agent daemonset | `false` | +| `datadog.processAgent.enabled` | Enable live process and container monitoring agent. Possible values: `true` enable process-agent, `false` disable process-agent | `true` | +| `datadog.processAgent.processCollection` | Enable live process collection. Possible values: `true` enable process collection, `false` disable process collection | `false` | +| `datadog.checksd` | Additional custom checks as python code | `nil` | +| `datadog.confd` | Additional check configurations (static and Autodiscovery) | `nil` | +| `datadog.dockerSocketPath` | Path to the docker socket | `/var/run/docker.sock` | +| `datadog.criSocketPath` | Path to the container runtime socket (default is Docker runtime) | `nil` | +| `datadog.tags` | Set host tags | `nil` | +| `datadog.dogstatsd.originDetection` | Enable origin detection for container tagging | `False` | +| `datadog.dogstatsd.port` | Used to override the default agent DogStatsD Port | `8125` | +| `datadog.dogstatsd.useHostPID` | If true, use the host's PID namespace | `nil` | +| `datadog.dogstatsd.useHostPort` | If true, use the same ports for both host and container | `nil` | +| `datadog.dogstatsd.nonLocalTraffic` | Enable statsd reporting from any external ip | `False` | +| `datadog.dogstatsd.useSocketVolume` | Enable dogstatsd over Unix Domain Socket | `False` | +| `datadog.dogstatsd.socketPath` | Custom path to the socket, has to be located in the `/var/run/datadog/` folder path | `/var/run/datadog/dsd.socket` | +| `datadog.dogstatsd.hostPath` | host directory that contains the dogstatsd socket | `/var/run/datadog/` | +| `datadog.nodeLabelsAsTags` | Kubernetes Node Labels to Datadog Tags mapping | `nil` | +| `datadog.podAnnotationsAsTags` | Kubernetes Annotations to Datadog Tags mapping | `nil` | +| `datadog.podLabelsAsTags` | Kubernetes Labels to Datadog Tags mapping | `nil` | +| `datadog.securityContext` | Allows you to overwrite the default securityContext applied to the container | `nil` | +| `datadog.acInclude` | (Deprecated) Include containers based on image name | `nil` | +| `datadog.acExclude` | (Deprecated) Exclude containers based on image name | `nil` | +| `datadog.containerInclude` | Include containers based on image name, container name or kubernetes namespace | `nil` | +| `datadog.containerExclude` | Exclude containers based on image name, container name or kubernetes namespace | `nil` | +| `datadog.containerIncludeMetrics` | Include containers for metrics collection based on image name, container name or kubernetes namespace | `nil` | +| `datadog.containerExcludeMetrics` | Exclude containers from metrics based on image name, container name or kubernetes namespace | `nil` | +| `datadog.containerIncludeLogs` | Include containers for logs collection based on image name, container name or kubernetes namespace | `nil` | +| `datadog.containerExcludeLogs` | Exclude containers from logs based on image name, container name or kubernetes namespace | `nil` | +| `datadog.systemProbe.enabled` | enable system probe collection | `false` | +| `datadog.systemProbe.seccomp` | Apply an ad-hoc seccomp profile to system-probe to restrict its privileges | `localhost/system-probe` | +| `datadog.systemProbe.seccompRoot` | Seccomp root directory for system-probe | `/var/lib/kubelet/seccomp` | +| `datadog.systemProbe.debugPort` | The port to expose pprof and expvar for system-probe agent, it is not enabled if the value is set to 0 | `0` | +| `datadog.systemProbe.enableConntrack` | If true, system-probe connects to the netlink/conntrack subsystem to add NAT information to connection data. Ref: http://conntrack-tools.netfilter.org/ | `true` | +| `datadog.systemProbe.bpfDebug` | If true, system-probe writes debug logs to /sys/kernel/debug/tracing/trace_pipe | `false` | +| `datadog.systemProbe.apparmor` | Apparmor profile for system-probe | `unconfined` | +| `datadog.systemProbe.enableTCPQueueLength` | Enable the TCP queue length eBPF-based check | `false` | +| `datadog.systemProbe.enableOOMKill` | Enable the OOM kill eBPF-based check | `false` | +| `datadog.orchestratorExplorer.enabled` | Enable the Orchestrator Explorer data collection | `false` | +| `agents.podAnnotations` | Annotations to add to the DaemonSet's Pods | `nil` | +| `agents.podLabels` | labels to add to each pod | `nil` | +| `agents.tolerations` | List of node taints to tolerate (requires Kubernetes >= 1.6) | `nil` | +| `agents.nodeSelector` | Node selectors | `nil` | +| `agents.affinity` | Node affinities | `nil` | +| `agents.useHostNetwork` | If true, use the host's network | `nil` | +| `agents.dnsConfig` | If set, configure dnsConfig options in datadog agent containers | `nil` | +| `agents.containers.agent.env` | Additional list of environment variables to use in the agent container | `nil` | +| `agents.containers.agent.logLevel` | Agent log verbosity | `INFO` | +| `agents.containers.agent.resources.limits.cpu` | CPU resource limits for the agent container | not set | +| `agents.containers.agent.resources.requests.cpu` | CPU resource requests for the agent container | not set | +| `agents.containers.agent.resources.limits.memory` | Memory resource limits for the agent container | not set | +| `agents.containers.agent.resources.requests.memory` | Memory resource requests for the agent container | not set | +| `agents.containers.agent.livenessProbe` | Overrides the default liveness probe | http check on /live with port 5555 | +| `agents.containers.agent.readinessProbe` | Overrides the default readiness probe | http check on /ready with port 5555 | +| `agents.containers.processAgent.env` | Additional list of environment variables to use in the process-agent container | `nil` | +| `agents.containers.processAgent.logLevel` | Process agent log verbosity | `INFO` | +| `agents.containers.processAgent.resources.limits.cpu` | CPU resource limits for the process-agent container | `100m` | +| `agents.containers.processAgent.resources.requests.cpu` | CPU resource requests for the process-agent container | `100m` | +| `agents.containers.processAgent.resources.limits.memory` | Memory resource limits for the process-agent container | `200Mi` | +| `agents.containers.processAgent.resources.requests.memory` | Memory resource requests for the process-agent container | `200Mi` | +| `agents.containers.traceAgent.env` | Additional list of environment variables to use in the trace-agent container | `nil` | +| `agents.containers.traceAgent.logLevel` | Trace agent log verbosity | `INFO` | +| `agents.containers.traceAgent.resources.limits.cpu` | CPU resource limits for the trace-agent container | `100m` | +| `agents.containers.traceAgent.resources.requests.cpu` | CPU resource requests for the trace-agent container | `100m` | +| `agents.containers.traceAgent.resources.limits.memory` | Memory resource limits for the trace-agent container | `200Mi` | +| `agents.containers.traceAgent.resources.requests.memory` | Memory resource requests for the trace-agent container | `200Mi` | +| `agents.containers.systemProbe.env` | Additional list of environment variables to use in the system-probe container | `nil` | +| `agents.containers.systemProbe.logLevel` | System probe log verbosity | `INFO` | +| `agents.containers.systemProbe.resources.limits.cpu` | CPU resource limits for the system-probe container | `100m` | +| `agents.containers.systemProbe.resources.requests.cpu` | CPU resource requests for the system-probe container | `100m` | +| `agents.containers.systemProbe.resources.limits.memory` | Memory resource limits for the system-probe container | `200Mi` | +| `agents.containers.systemProbe.resources.requests.memory` | Memory resource requests for the system-probe container | `200Mi` | +| `agents.containers.initContainers.resources.limits.cpu` | CPU resource limits for the init containers container | not set | +| `agents.containers.initContainers.resources.requests.cpu` | CPU resource requests for the init containers container | not set | +| `agents.containers.initContainers.resources.limits.memory` | Memory resource limits for the init containers container | not set | +| `agents.containers.initContainers.resources.requests.memory` | Memory resource requests for the init containers container | not set | +| `agents.priorityClassName` | Which Priority Class to associate with the daemonset | `nil` | +| `agents.useConfigMap` | Configures a configmap to provide the agent configuration. Use this in combination with the `agent.customAgentConfig` parameter. | `false` | +| `agents.customAgentConfig` | Specify custom contents for the datadog agent config (datadog.yaml). Note the `agents.useConfigMap` parameter needs to be set to `true` for this parameter to be taken into account. | `{}` | +| `agents.updateStrategy` | Which update strategy to deploy the daemonset | RollingUpdate with 10% maxUnavailable | +| `agents.volumes` | Additional volumes for the daemonset or deployment | `nil` | +| `agents.volumeMounts` | Additional volumeMounts for the daemonset or deployment | `nil` | +| `agents.podSecurity.podSecurityPolicy.create` | If true, create a PodSecurityPolicy resource for the Agent's Pods. Supported only for Linux agent's daemonset. | `False` | +| `agents.podSecurity.securityContextConstraints.create` | If true, create a SecurityContextConstraints resource for the Agent's Pods. Supported only for Linux agent's daemonset. | `False` | +| `datadog.podSecurity.securityContext` | Allows you to overwrite the default securityContext applied to the container | default security context configuration | +| `agents.podSecurity.privileged` | If true, allowed privileged containers | `False` | +| `agents.podSecurity.capabilites` | list of allowed capabilities | `[SYS_ADMIN, SYS_RESOURCE, SYS_ADMIN, IPC_LOCK]` | +| `agents.podSecurity.volumes` | list of allowed volumes types | `[configMap,downwardAPI,emptyDir,ostPath,secret]` | +| `agents.podSecurity.seccompProfiles` | List of allowed seccomp profiles | `["*"]` | +| `agents.podSecurity.apparmorProfiles` | List of allowed apparmor profiles | `["*"]` | +| `datadog.leaderElection` | Enable the leader Election feature | `false` | +| `datadog.leaderLeaseDuration` | The duration for which a leader stays elected. | 60 sec, 15 if Cluster Checks enabled | +| `datadog.collectEvents` | Enable Kubernetes event collection. Requires leader election. | `false` | +| `datadog.kubeStateMetricsEnabled` | If true, create kube-state-metrics | `true` | +| `clusterAgent.enabled` | Use the cluster-agent for cluster metrics (Kubernetes 1.10+ only) | `false` | +| `clusterAgent.token` | A cluster-internal secret for agent-to-agent communication. Must be 32+ characters a-zA-Z | Generates a random value | +| `clusterAgent.tokenExistingSecret` | If set, use the secret with a provided name instead of creating a new one | `nil` | +| `clusterAgent.image.repository` | The image repository for the cluster-agent | `datadog/cluster-agent` | +| `clusterAgent.image.tag` | The image tag to pull | `1.2.0` | +| `clusterAgent.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `clusterAgent.image.pullSecrets` | Image pull secrets | `nil` | +| `clusterAgent.command` | Override the default command to run in the container | `nil` | +| `clusterAgent.rbac.create` | If true, create & use RBAC resources for cluster agent's pods | `true` | +| `clusterAgent.rbac.serviceAccount` | existing ServiceAccount to use (ignored if rbac.create=true) for cluster agent's pods | `default` | +| `clusterAgent.metricsProvider.enabled` | Enable Datadog metrics as a source for HPA scaling | `false` | +| `clusterAgent.metricsProvider.service.type` | The type of service to use for the clusterAgent metrics server | `ClusterIP` | +| `clusterAgent.metricsProvider.service.port` | The port for service to use for the clusterAgent metrics server (Kubernetes >= 1.15) | `8443` | +| `clusterAgent.metricsProvider.wpaController` | Allows metricsProvider to work with WatermarkPodAutoscaler (requires WPA CRD) | `false` | +| `clusterAgent.metricsProvider.useDatadogMetrics` | Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries (requires DatadogMetric CRD) | `false` | +| `clusterAgent.metricsProvider.createReaderRbac` | Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) | `true` | +| `clusterAgent.env` | Additional Datadog environment variables for the cluster-agent | `nil` | +| `clusterAgent.confd` | Additional check configurations (static and Autodiscovery) | `nil` | +| `clusterAgent.podAnnotations` | Annotations to add to the Cluster Agent Pod(s) | `nil` | +| `clusterAgent.podLabels` | Labels to add to the Cluster Agent Pod(s) | `nil` | +| `clusterAgent.createPodDisruptionBudget` | Enable a pod disruption budget to apply to the Cluster Agent pods | `false` | +| `clusterAgent.priorityClassName` | Name of the priorityClass to apply to the Cluster Agent | `nil` | +| `clusterAgent.nodeSelector` | Node selectors to apply to the Cluster Agent deployment | `nil` | +| `clusterAgent.affinity` | Node affinities to apply to the Cluster Agent deployment | `nil` | +| `clusterAgent.resources.requests.cpu` | CPU resource requests | not set | +| `clusterAgent.resources.limits.cpu` | CPU resource limits | not set | +| `clusterAgent.resources.requests.memory` | Memory resource requests | not set | +| `clusterAgent.resources.limits.memory` | Memory resource limits | not set | +| `clusterAgent.tolerations` | List of node taints to tolerate | `[]` | +| `clusterAgent.healthPort` | Overrides the default health port used by the liveness and readiness endpoint | `5555` | +| `clusterAgent.livenessProbe` | Overrides the default liveness probe | `http check on /live with port 5555` | +| `clusterAgent.readinessProbe` | Overrides the default readiness probe | `http check on /ready with port 5555` | +| `clusterAgent.strategy` | Which update strategy to deploy the cluster-agent | RollingUpdate with 0 maxUnavailable, 1 maxSurge | +| `clusterAgent.useHostNetwork` | If true, use the host's network | `nil` | +| `clusterAgent.dnsConfig` | If set, configure dnsConfig options in datadog cluster agent containers | `nil` | +| `clusterAgent.volumes` | Additional volumes for the cluster-agent deployment | `nil` | +| `clusterAgent.volumeMounts` | Additional volumeMounts for the cluster-agent deployment | `nil` | +| `clusterChecksRunner.enabled` | Enable Datadog agent deployment dedicated for running Cluster Checks. It allows having different resources (Request/Limit) for Cluster Checks agent pods. | `false` | +| `clusterChecksRunner.env` | Additional Datadog environment variables for Cluster Checks Deployment | `nil` | +| `clusterChecksRunner.createPodDisruptionBudget` | Enable a pod disruption budget to apply to the Cluster Checks pods | `false` | +| `clusterChecksRunner.resources.requests.cpu` | CPU resource requests | not set | +| `clusterChecksRunner.resources.limits.cpu` | CPU resource limits | not set | +| `clusterChecksRunner.resources.requests.memory` | Memory resource requests | not set | +| `clusterChecksRunner.resources.limits.memory` | Memory resource limits | not set | +| `clusterChecksRunner.nodeSelector` | Node selectors | `nil` | +| `clusterChecksRunner.tolerations` | List of node taints to tolerate | `nil` | +| `clusterChecksRunner.affinity` | Node affinities | avoid running pods on the same node | +| `clusterChecksRunner.livenessProbe` | Overrides the default liveness probe | http check on /live with port 5555 | +| `clusterChecksRunner.readinessProbe` | Overrides the default readiness probe | http check on /ready with port 5555 | +| `clusterChecksRunner.rbac.create` | If true, create & use RBAC resources for clusterchecks agent's pods | `true` | +| `clusterChecksRunner.rbac.dedicated` | If true, use dedicated RBAC resources for clusterchecks agent's pods | `false` | +| `clusterChecksRunner.rbac.serviceAccount` | existing ServiceAccount to use (ignored if rbac.create=true) for clusterchecks agent's pods | `default` | +| `clusterChecksRunner.rbac.serviceAccountAnnotations` | Annotations to add to the ServiceAccount if `clusterChecksRunner.rbac.dedicated` is `true` | `{}` | +| `clusterChecksRunner.strategy` | Which update strategy to deploy the Cluster Checks Deployment | RollingUpdate with 0 maxUnavailable, 1 maxSurge | +| `clusterChecksRunner.volumes` | Additional volumes for the Cluster Checks deployment | `nil` | +| `clusterChecksRunner.volumeMounts` | Additional volumeMounts for the Cluster Checks deployment | `nil` | +| `clusterChecksRunner.dnsConfig` | If set, configure dnsConfig options in datadog cluster agent clusterChecks containers | `nil` | +| `kube-state-metrics.rbac.create` | If true, create & use RBAC resources for kube-state-metrics | `true` | +| `kube-state-metrics.serviceAccount.create` | If true, create & use serviceAccount | `true` | +| `kube-state-metrics.serviceAccount.name` | If not set & create is true, use template fullname | | +| `kube-state-metrics.resources` | Overwrite the default kube-state-metrics container resources (Optional) | | + +## Configuration options for Windows deployments + +Some options above are not working/not available on Windows, here is the list of **unsupported** options: + +| Parameter | Reason | +|------------------------------------------|--------------------------------------------------| +| `datadog.dogstatsd.useHostPID` | Host PID not supported by Windows Containers | +| `datadog.dogstatsd.useSocketVolume` | Unix sockets not supported on Windows | +| `datadog.dogstatsd.socketPath` | Unix sockets not supported on Windows | +| `datadog.processAgent.processCollection` | Unable to access host/other containers processes | +| `datadog.systemProbe.enabled` | System probe is not available for Windows | +| `datadog.systemProbe.seccomp` | System probe is not available for Windows | +| `datadog.systemProbe.seccompRoot` | System probe is not available for Windows | +| `datadog.systemProbe.debugPort` | System probe is not available for Windows | +| `datadog.systemProbe.enableConntrack` | System probe is not available for Windows | +| `datadog.systemProbe.bpfDebug` | System probe is not available for Windows | +| `datadog.systemProbe.apparmor` | System probe is not available for Windows | +| `agents.useHostNetwork` | Host network not supported by Windows Containers | diff --git a/charts/datadog/datadog/2.4.200/app-readme.md b/charts/datadog/datadog/2.4.200/app-readme.md new file mode 100644 index 000000000..4adcb1d9f --- /dev/null +++ b/charts/datadog/datadog/2.4.200/app-readme.md @@ -0,0 +1,32 @@ +# Datadog + +[Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). + +Datadog [offers two variants](https://hub.docker.com/r/datadog/agent/tags/), switch to a `-jmx` tag if you need to run JMX/java integrations. The chart also supports running [the standalone dogstatsd image](https://hub.docker.com/r/datadog/dogstatsd/tags/). + +See the [Datadog JMX integration](https://docs.datadoghq.com/integrations/java/) to learn more. + +## Prerequisites + +Kubernetes 1.4+ or OpenShift 3.4+, note that: + +* the Datadog Agent supports Kubernetes 1.3+ +* The Datadog chart's defaults are tailored to Kubernetes 1.7.6+, see [Datadog Agent legacy Kubernetes versions documentation](https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#legacy-kubernetes-versions) for adjustments you might need to make for older versions + +## Quick start + +By default, the Datadog Agent runs in a DaemonSet. It can alternatively run inside a Deployment for special use cases. + +**Note:** simultaneous DaemonSet + Deployment installation within a single release will be deprecated in a future version, requiring two releases to achieve this. + +### Installing the Datadog Chart + +To install the chart with the release name ``, retrieve your Datadog API key from your [Agent Installation Instructions](https://app.datadoghq.com/account/settings#agent/kubernetes) and run: + +```bash +helm install --name \ + --set datadog.apiKey= stable/datadog +``` + +By default, this Chart creates a Secret and puts an API key in that Secret. +However, you can use manually created secret by setting the `datadog.apiKeyExistingSecret` value. After a few minutes, you should see hosts and metrics being reported in Datadog. diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/.helmignore b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/Chart.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 000000000..4ef688395 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 1.9.7 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: jose@armesto.net + name: fiunchinho +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +version: 2.8.11 diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/OWNERS b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/OWNERS new file mode 100644 index 000000000..6ffd97d74 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/OWNERS @@ -0,0 +1,8 @@ +approvers: +- fiunchinho +- tariq1890 +- mrueg +reviewers: +- fiunchinho +- tariq1890 +- mrueg diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/README.md b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/README.md new file mode 100644 index 000000000..7d5cee525 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/README.md @@ -0,0 +1,79 @@ +# kube-state-metrics Helm Chart + +* Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install stable/kube-state-metrics +``` + +## Configuration + +| Parameter | Description | Default | +|:---------------------------------------------|:--------------------------------------------------------------------------------------|:-------------------------------------------| +| `image.repository` | The image repository to pull from | `quay.io/coreos/kube-state-metrics` | +| `image.tag` | The image tag to pull from | `v1.9.7` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `imagePullSecrets` | List of container registry secrets | `[]` | +| `replicas` | Number of replicas | `1` | +| `autosharding.enabled` | Set to `true` to automatically shard data across `replicas` pods. EXPERIMENTAL | `false` | +| `service.port` | The port of the container | `8080` | +| `service.annotations` | Annotations to be added to the service | `{}` | +| `customLabels` | Custom labels to apply to service, deployment and pods | `{}` | +| `hostNetwork` | Whether or not to use the host network | `false` | +| `prometheusScrape` | Whether or not enable prom scrape | `true` | +| `rbac.create` | If true, create & use RBAC resources | `true` | +| `serviceAccount.create` | If true, create & use serviceAccount | `true` | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `serviceAccount.imagePullSecrets` | Specify image pull secrets field | `[]` | +| `podSecurityPolicy.enabled` | If true, create & use PodSecurityPolicy resources. Note that related RBACs are created only if `rbac.enabled` is `true`. | `false` | +| `podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +| `podSecurityPolicy.additionalVolumes` | Specify allowed volumes in the pod security policy (`secret` is always allowed) | `[]` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the filesystem | `65534` | +| `securityContext.runAsGroup` | Group ID for the container | `65534` | +| `securityContext.runAsUser` | User ID for the container | `65534` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `podAnnotations` | Annotations to be added to the pod | `{}` | +| `podDisruptionBudget` | Optional PodDisruptionBudget | `{}` | +| `resources` | kube-state-metrics resource requests and limits | `{}` | +| `collectors.certificatesigningrequests` | Enable the certificatesigningrequests collector. | `true` | +| `collectors.configmaps` | Enable the configmaps collector. | `true` | +| `collectors.cronjobs` | Enable the cronjobs collector. | `true` | +| `collectors.daemonsets` | Enable the daemonsets collector. | `true` | +| `collectors.deployments` | Enable the deployments collector. | `true` | +| `collectors.endpoints` | Enable the endpoints collector. | `true` | +| `collectors.horizontalpodautoscalers` | Enable the horizontalpodautoscalers collector. | `true` | +| `collectors.ingresses` | Enable the ingresses collector. | `true` | +| `collectors.jobs` | Enable the jobs collector. | `true` | +| `collectors.limitranges` | Enable the limitranges collector. | `true` | +| `collectors.mutatingwebhookconfigurations` | Enable the mutatingwebhookconfigurations collector. | `true` | +| `collectors.namespaces` | Enable the namespaces collector. | `true` | +| `collectors.networkpolicies` | Enable the networkpolicies collector. | `true` | +| `collectors.nodes` | Enable the nodes collector. | `true` | +| `collectors.persistentvolumeclaims` | Enable the persistentvolumeclaims collector. | `true` | +| `collectors.persistentvolumes` | Enable the persistentvolumes collector. | `true` | +| `collectors.poddisruptionbudgets` | Enable the poddisruptionbudgets collector. | `true` | +| `collectors.pods` | Enable the pods collector. | `true` | +| `collectors.replicasets` | Enable the replicasets collector. | `true` | +| `collectors.replicationcontrollers` | Enable the replicationcontrollers collector. | `true` | +| `collectors.resourcequotas` | Enable the resourcequotas collector. | `true` | +| `collectors.secrets` | Enable the secrets collector. | `true` | +| `collectors.services` | Enable the services collector. | `true` | +| `collectors.statefulsets` | Enable the statefulsets collector. | `true` | +| `collectors.storageclasses` | Enable the storageclasses collector. | `true` | +| `collectors.validatingwebhookconfigurations` | Enable the validatingwebhookconfigurations collector. | `true` | +| `collectors.verticalpodautoscalers` | Enable the verticalpodautoscalers collector. | `true` | +| `collectors.volumeattachments` | Enable the volumeattachments collector. | `true` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `prometheus.monitor.namespace` | Namespace where servicemonitor resource should be created | `the same namespace as kube-state-metrics` | +| `prometheus.monitor.honorLabels` | Honor metric labels | `false` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `kubeTargetVersionOverride` | Override the k8s version of the target cluster | `""` | diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/NOTES.txt b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 000000000..5a646e0cc --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/_helpers.tpl b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 000000000..6ae0e647f --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrole.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrole.yaml new file mode 100644 index 000000000..319aec16c --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrole.yaml @@ -0,0 +1,180 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +rules: +{{ if .Values.collectors.certificatesigningrequests }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.configmaps }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.cronjobs }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.daemonsets }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.deployments }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.endpoints }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.horizontalpodautoscalers }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.ingresses }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.jobs }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.limitranges }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.mutatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.namespaces }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.networkpolicies }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.nodes }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.persistentvolumeclaims }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.persistentvolumes }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.poddisruptionbudgets }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.pods }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.replicasets }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.replicationcontrollers }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.resourcequotas }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.secrets }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.services }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.statefulsets }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.storageclasses }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.validatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.volumeattachments }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.verticalpodautoscalers }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..4635985aa --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-state-metrics.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/deployment.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 000000000..99656046c --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,191 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + replicas: {{ .Values.replicas }} +{{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] +{{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} +{{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- end }} + args: +{{ if .Values.collectors.certificatesigningrequests }} + - --collectors=certificatesigningrequests +{{ end }} +{{ if .Values.collectors.configmaps }} + - --collectors=configmaps +{{ end }} +{{ if .Values.collectors.cronjobs }} + - --collectors=cronjobs +{{ end }} +{{ if .Values.collectors.daemonsets }} + - --collectors=daemonsets +{{ end }} +{{ if .Values.collectors.deployments }} + - --collectors=deployments +{{ end }} +{{ if .Values.collectors.endpoints }} + - --collectors=endpoints +{{ end }} +{{ if .Values.collectors.horizontalpodautoscalers }} + - --collectors=horizontalpodautoscalers +{{ end }} +{{ if .Values.collectors.ingresses }} + - --collectors=ingresses +{{ end }} +{{ if .Values.collectors.jobs }} + - --collectors=jobs +{{ end }} +{{ if .Values.collectors.limitranges }} + - --collectors=limitranges +{{ end }} +{{ if .Values.collectors.mutatingwebhookconfigurations }} + - --collectors=mutatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.namespaces }} + - --collectors=namespaces +{{ end }} +{{ if .Values.collectors.networkpolicies }} + - --collectors=networkpolicies +{{ end }} +{{ if .Values.collectors.nodes }} + - --collectors=nodes +{{ end }} +{{ if .Values.collectors.persistentvolumeclaims }} + - --collectors=persistentvolumeclaims +{{ end }} +{{ if .Values.collectors.persistentvolumes }} + - --collectors=persistentvolumes +{{ end }} +{{ if .Values.collectors.poddisruptionbudgets }} + - --collectors=poddisruptionbudgets +{{ end }} +{{ if .Values.collectors.pods }} + - --collectors=pods +{{ end }} +{{ if .Values.collectors.replicasets }} + - --collectors=replicasets +{{ end }} +{{ if .Values.collectors.replicationcontrollers }} + - --collectors=replicationcontrollers +{{ end }} +{{ if .Values.collectors.resourcequotas }} + - --collectors=resourcequotas +{{ end }} +{{ if .Values.collectors.secrets }} + - --collectors=secrets +{{ end }} +{{ if .Values.collectors.services }} + - --collectors=services +{{ end }} +{{ if .Values.collectors.statefulsets }} + - --collectors=statefulsets +{{ end }} +{{ if .Values.collectors.storageclasses }} + - --collectors=storageclasses +{{ end }} +{{ if .Values.collectors.validatingwebhookconfigurations }} + - --collectors=validatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.verticalpodautoscalers }} + - --collectors=verticalpodautoscalers +{{ end }} +{{ if .Values.collectors.volumeattachments }} + - --collectors=volumeattachments +{{ end }} +{{ if .Values.namespace }} + - --namespace={{ .Values.namespace }} +{{ end }} +{{ if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) +{{ end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: 8080 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/pdb.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/pdb.yaml new file mode 100644 index 000000000..6adb50d79 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} \ No newline at end of file diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..e822ba0e7 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100644 index 000000000..217abc950 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100644 index 000000000..feb97f228 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/service.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 000000000..5dacf5217 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: 8080 +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} + selector: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 000000000..32bb1640f --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 000000000..54cde362d --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,25 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 000000000..bf5396072 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - kube-state-metrics + resources: + - statefulsets + verbs: + - get +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 000000000..6a2e5bfe7 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/values.yaml b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/values.yaml new file mode 100644 index 000000000..11f48fb77 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/charts/kube-state-metrics/values.yaml @@ -0,0 +1,139 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: quay.io/coreos/kube-state-metrics + tag: v1.9.7 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + annotations: {} + +customLabels: {} + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + honorLabels: false + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +securityContext: + enabled: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Available collectors for kube-state-metrics. By default all available +# collectors are enabled. +collectors: + certificatesigningrequests: true + configmaps: true + cronjobs: true + daemonsets: true + deployments: true + endpoints: true + horizontalpodautoscalers: true + ingresses: true + jobs: true + limitranges: true + mutatingwebhookconfigurations: true + namespaces: true + networkpolicies: true + nodes: true + persistentvolumeclaims: true + persistentvolumes: true + poddisruptionbudgets: true + pods: true + replicasets: true + replicationcontrollers: true + resourcequotas: true + secrets: true + services: true + statefulsets: true + storageclasses: true + validatingwebhookconfigurations: true + verticalpodautoscalers: false + volumeattachments: true + +# Namespace to be enabled for collecting resources. By default all namespaces are collected. +# namespace: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" diff --git a/charts/datadog/datadog/2.4.200/ci/cluster-agent-and-worker-with-dedicated-rbac-values.yaml b/charts/datadog/datadog/2.4.200/ci/cluster-agent-and-worker-with-dedicated-rbac-values.yaml new file mode 100644 index 000000000..d290ace8a --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/cluster-agent-and-worker-with-dedicated-rbac-values.yaml @@ -0,0 +1,17 @@ +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + clusterChecks: + enabled: true + +clusterAgent: + enabled: true + +clusterChecksRunner: + enabled: true + replicas: 1 + rbac: + dedicated: true + serviceAccountAnnotations: + "eks.amazonaws.com/role-arn": "arn:aws:iam::123456789012:role/datadog-clusterchecker" diff --git a/charts/datadog/datadog/2.4.200/ci/cluster-agent-metrics-server-service-port-values.yaml b/charts/datadog/datadog/2.4.200/ci/cluster-agent-metrics-server-service-port-values.yaml new file mode 100644 index 000000000..2006b81a4 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/cluster-agent-metrics-server-service-port-values.yaml @@ -0,0 +1,13 @@ +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + +clusterAgent: + enabled: true + + metricsProvider: + enabled: true + + service: + port: 4443 diff --git a/charts/datadog/datadog/2.4.200/ci/cluster-agent-values.yaml b/charts/datadog/datadog/2.4.200/ci/cluster-agent-values.yaml new file mode 100644 index 000000000..7a5aeb66a --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/cluster-agent-values.yaml @@ -0,0 +1,25 @@ +datadog: + clusterName: kubernetes-cluster.example.com + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + clusterChecks: + enabled: true + +clusterAgent: + enabled: true + wpaController: true + +clusterChecksRunner: + enabled: true + replicas: 1 + + volumes: + - name: tmp + hostPath: + path: /tmp + + volumeMounts: + - name: tmp + mountPath: /etc/tmp + readOnly: true diff --git a/charts/datadog/datadog/2.4.200/ci/default-values.yaml b/charts/datadog/datadog/2.4.200/ci/default-values.yaml new file mode 100644 index 000000000..ff92bd38a --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/default-values.yaml @@ -0,0 +1,4 @@ +# Empty values file for testing default parameters. +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" diff --git a/charts/datadog/datadog/2.4.200/ci/dogstastd-socket-values.yaml b/charts/datadog/datadog/2.4.200/ci/dogstastd-socket-values.yaml new file mode 100644 index 000000000..a514bea92 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/dogstastd-socket-values.yaml @@ -0,0 +1,12 @@ +# Empty values file for testing default parameters. +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + + dogstatsd: + useSocketVolume: true + + apm: + enabled: true + useSocketVolume: true diff --git a/charts/datadog/datadog/2.4.200/ci/kubeval.yaml b/charts/datadog/datadog/2.4.200/ci/kubeval.yaml new file mode 100644 index 000000000..c1723ded6 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/kubeval.yaml @@ -0,0 +1,46 @@ +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + env: + - name: "DD_KUBELET_TLS_VERIFY" + value: "false" + logs: + enabled: true + containerCollectAll: true + apm: + enabled: true + processAgent: + enabled: true + processCollection: true + systemProbe: + enabled: true + enableConntrack: true + enableTCPQueueLength: true + enableOOMKill: true + collectDNSStats: true + orchestratorExplorer: + enabled: true + clusterChecks: + enabled: true + kubeStateMetricsEnabled: true +clusterAgent: + enabled: true + createPodDisruptionBudget: true + nodeSelector: + kubernetes.io/os: linux + metricsProvider: + enabled: false + admissionController: + enabled: true + mutateUnlabelled: true +clusterChecksRunner: + enabled: true + createPodDisruptionBudget: true + nodeSelector: + kubernetes.io/os: linux +agents: + nodeSelector: + kubernetes.io/os: linux + podSecurity: + podSecurityPolicy: + create: true diff --git a/charts/datadog/datadog/2.4.200/ci/no_hardened_seccomp-values.yaml b/charts/datadog/datadog/2.4.200/ci/no_hardened_seccomp-values.yaml new file mode 100644 index 000000000..bafa362ef --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/no_hardened_seccomp-values.yaml @@ -0,0 +1,7 @@ +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + systemProbe: + enabled: true + seccomp: runtime/default diff --git a/charts/datadog/datadog/2.4.200/ci/psp-test-values.yaml b/charts/datadog/datadog/2.4.200/ci/psp-test-values.yaml new file mode 100644 index 000000000..f0bb23a43 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/psp-test-values.yaml @@ -0,0 +1,9 @@ +# Empty values file for testing default parameters. +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false +agents: + podSecurity: + podSecurityPolicy: + create: true diff --git a/charts/datadog/datadog/2.4.200/ci/securitycontext-nil-values.yaml b/charts/datadog/datadog/2.4.200/ci/securitycontext-nil-values.yaml new file mode 100644 index 000000000..155e92a35 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/ci/securitycontext-nil-values.yaml @@ -0,0 +1,7 @@ +# Test the support of `securitContext` set to `nil` +datadog: + apiKey: "00000000000000000000000000000000" + appKey: "0000000000000000000000000000000000000000" + kubeStateMetricsEnabled: false + + securityContext: diff --git a/charts/datadog/datadog/2.4.200/docs/Migration_1.x_to_2.x.md b/charts/datadog/datadog/2.4.200/docs/Migration_1.x_to_2.x.md new file mode 100644 index 000000000..062bcc5c5 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/docs/Migration_1.x_to_2.x.md @@ -0,0 +1,72 @@ +# Chart 1.x to 2.x migration guide + +The `datadog` chart has been refactored to regroup the `values.yaml` parameters in a more logical way. +Migrating from chart v1 to chart v2 hence requires that you restructure the `values.yaml` file. +For all the parameters in the existing `values.yaml` file that applied to chart v1, you’ll +find the parameters to which they correspond to in v2 in the following table. +Parameters that are not listed in the table below haven’t been touched and are at the same +location in v1 and v2. + +| Old parameter | New location | comment | +| ------------- | ------------ | ------- | +| `image.repository` | `agents.image.repository` and `clusterCheckRunner.image.repository` | | +| `image.tag` | `agents.image.tag` and `clusterCheckRunner.image.tag` | | +| `image.pullPolicy` | `agents.image.pullPolicy` and `clusterCheckRunner.image.pullPolicy` | | +| `image.pullSecrets` | `agents.image.pullSecrets` and `clusterCheckRunner.image.pullSecrets` | | +| `datadog.name` | ∅ | The name of the container inside the Agent and Cluster Agent pod isn’t configurable anymore | +| `datadog.useCriSocketVolume` | ∅ | If `datadog.criSocketPath` is defined, the socket will be mounted inside the container without needing to set `datadog.useCriSocketVolume` in addition. | +| `datadog.containerLogsPath` | ∅ | Not needed anymore because the chart automatically detects if the CRI is `docker` based on `criSocketPath` and mounts the path accordingly | +| `datadog.apmEnabled` | `datadog.apm.enabled` | | +| `datadog.processAgentEnabled` | `datadog.processAgent.enabled` and `datadog.processAgent.processCollection:true` | | +| `datadog.volumes` | `agent.volumes` | | +| `datadog.volumeMounts` | `agent.VolumeMounts` | | +| `datadog.livenessProbe` | `agents.containers.agent.livenessProbe` | | +| `datadog.resources` | `agents.containers.agent.resources` | | +| `datadog.dogstatsdOriginDetection` | `datadog.dogstatsd.originDetection` | | +| `datadog.useDogStatsDSocketVolume` | `datadog.dogstatsd.useSocketVolume` | | +| `systemProbe.enabled` | `datadog.systemProbe.enabled` | | +| `systemProbe.debugPort` | `datadog.systemProbe.debugPort` | | +| `systemProbe.enableConntrack` | `datadog.systemProbe.enableConntrack` | | +| `systemProbe.seccomp` | `datadog.systemProbe.seccomp` | | +| `systemProbe.seccompRoot` | `datadog.systemProbe.seccompRoot` | | +| `systemProbe.bpfDebug` | `datadog.systemProbe.bpfDebug` | | +| `systemProbe.apparmor` | `datadog.systemProbe.apparmor` | | +| `clusterAgent.containerName` | ∅ | The name of the container inside the Cluster Agent pod isn’t configurable anymore | +| `clusterAgent.clusterChecks.enabled` | `datadog.clusterChecks.enabled` | | +| `rbac.create` | `agents.rbac.create` and `clusterAgent.rbac.create` | | +| `rbac.serviceAccountName` | `agents.rbac.serviceAccountName` and `clusterAgent.rabc.serviceAccountName` | | +| `toleration` | `agents.toleration` | | +| `kubeStateMetrics.enabled` | `datadog.kubeStateMetricsEnabled` | | +| `daemonset.enabled` | `agents.enabled` | | +| `daemonset.containers.agent.*` | `agents.containers.agent.*` | | +| `daemonset.containers.processAgent.*` | `agents.containers.processAgent.*` | | +| `daemonset.containers.traceAgent.*` | `agents.containers.traceAgent.*` | | +| `daemonset.containers.systemProbe.*` | `agents.containers.systemProbe.*` | | +| `daemonset.useHostNetwork` | `agents.useHostNetwork` | | +| `daemonset.dogstatsdPort` | `datadog.dogstatsd.port` | | +| `daemonset.useHostPort` | `datadog.dogstatsd.useHostPort` | | +| `daemonset.useHostPID` | `datadog.dogstatsd.useHostPID` | | +| `daemonset.nonLocalTraffic` | `datadog.dogstatsd.nonLocalTraffic` | | +| `daemonset.podAnnotations` | `agents.podAnnotations` | | +| `daemonset.tolerations` | `agents.tolerations` | | +| `daemonset.nodeSelector` | `agents.nodeSelector` | | +| `daemonset.affinity` | `agents.affinity` | | +| `daemonset.updateStrategy` | `agents.updateStrategy` | | +| `daemonset.priorityClassName` | `agents.priorityClassName` | | +| `daemonset.podLabels` | `agents.podLabels` | | +| `daemonset.useConfigMap` | `agents.useConfigMap` | | +| `daemonset.customAgentConfig.*` | `agents.customAgentConfig.*` | | +| `daemonset.useDedicatedContainers` | ∅ | | +| `deployment.*` | ∅ | | +| `clusterchecksDeployment.enabled` | `clusterChecksRunner.enabled` | | +| `clusterchecksDeployment.rbac.*` | `clusterChecksRunner.rbac.*` | | +| `clusterchecksDeployment.replicas` | `clusterChecksRunner.replicas` | | +| `clusterchecksDeployment.resources.*` | `clusterChecksRunner.resources.*` | | +| `clusterchecksDeployment.affinity` | `clusterChecksRunner.affinity` | | +| `clusterchecksDeployment.strategy` | `clusterChecksRunner.strategy` | | +| `clusterchecksDeployment.nodeSelector` | `clusterChecksRunner.nodeSelector` | | +| `clusterchecksDeployment.tolerations` | `clusterChecksRunner.tolerations` | | +| `clusterchecksDeployment.livenessProbe` | `clusterChecksRunner.livenessProbe` | | +| `clusterchecksDeployment.env` | `clusterChecksRunner.env` | | +| `logsEnabled` | `datadog.logs.enabled` | | +| `logsConfigContainerCollectAll` | `datadog.logs.containerCollectAll` | | diff --git a/charts/datadog/datadog/2.4.200/questions.yml b/charts/datadog/datadog/2.4.200/questions.yml new file mode 100644 index 000000000..4305c70e0 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/questions.yml @@ -0,0 +1,221 @@ +questions: +#image configurations +- variable: defaultImage + default: true + description: "Use default Datadog image or specify a custom one" + label: Use Default Datadog Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: agents.image.repository + default: "datadog/agent" + description: "Datadog image name" + type: string + label: Datadog Image Name + - variable: agents.image.tag + default: "7.21.1" + description: "Datadog Image Tag" + type: string + label: Datadog Image Tag + - variable: clusterAgent.image.repository + default: "datadog/cluster-agent" + description: "Datadog clusterAgent image name" + type: string + label: Datadog ClusterAgent Image Name + - variable: clusterAgent.image.tag + default: "1.7.0" + description: "Datadog ClusterAgent Image Tag" + type: string + label: Datadog ClusterAgent Image Tag + - variable: clusterChecksRunner.image.repository + default: "datadog/agent" + description: "Datadog clusterChecksRunner image name" + type: string + label: Datadog Cluster Checks Runner Image Name + show_if: "clusterChecksRunner.enabled=true&&defaultImage=false" + - variable: clusterChecksRunner.image.tag + default: "7.21.1" + description: "Datadog Cluster Checks Runner Image Tag" + type: string + label: Datadog Cluster Checks Runner Image Tag + show_if: "clusterChecksRunner.enabled=true&&defaultImage=false" + - variable: kube-state-metrics.image.repository + default: "quay.io/coreos/kube-state-metrics" + description: "KubeState image name" + type: string + label: KubeState Image Name + show_if: "kubeStateMetrics.enabled=true&&defaultImage=false" + - variable: kube-state-metrics.image.tag + default: "v1.9.7" + description: "KubeState Image Tag" + type: string + label: KubeState Image Tag + show_if: "kubeStateMetrics.enabled=true&&defaultImage=false" +#cluster agent configurations +- variable: clusterAgent.enabled + default: false + description: "Use the cluster-agent for cluster metrics (Kubernetes 1.10+ only), https://docs.datadoghq.com/agent/kubernetes/cluster/" + type: boolean + label: Enable Cluster Agent Metrics(Kubernetes 1.10+ only) + group: "Cluster Agent" +- variable: clusterAgent.metricsProvider.enabled + default: true + description: "Enable the metricsProvider to be able to scale based on metrics in Datadog" + type: boolean + label: Enable the metricsProvider + show_if: "clusterAgent.enabled=true" + group: "Cluster Agent" +- variable: datadog.appKey + default: "" + description: "Datadog App key required to use metricsProvider" + type: string + required: true + label: Datadog Metrics App Key + group: "Cluster Agent" + show_if: "clusterAgent.enabled=true&&clusterAgent.metricsProvider.enabled=true&&datadog.appKeyExistingSecret=" +- variable: datadog.appKeyExistingSecret + default: "" + description: "If set, use the secret with a provided name instead of creating a new appKey secret." + type: secret + label: Select Existing Datadog App Key(Secret) + group: "Cluster Agent" + show_if: "clusterAgent.enabled=true&&clusterAgent.metricsProvider.enabled=true&&datadog.appKey=" +#datadog agent configurations +- variable: datadog.apiKey + default: "" + description: "Enter your Datadog API Key." + type: string + label: Datadog API Key + group: "Agent Configuration" + required: true + show_if: "datadog.apiKeyExistingSecret=" +- variable: datadog.apiKeyExistingSecret + default: "" + description: "If set, use the secret with a provided name instead of creating a new apiKey secret." + type: secret + label: Select Existing Datadog API Key(Secret) + group: "Agent Configuration" + show_if: "datadog.apiKey=" +- variable: datadog.site + default: "datadoghq.com" + description: "The site of the Datadog intake to send Agent data to" + type: enum + label: Datadog Site URL + group: "Agent Configuration" + required: true + options: + - "datadoghq.com" + - "datadoghq.eu" + - "custom" +- variable: datadog.dd_url + required: true + default: "" + description: "The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL" + type: string + label: Datadog Custom Site URL + group: "Agent Configuration" + show_if: "datadog.site=custom" +- variable: datadog.logLevel + default: "warn" + description: "Set Agent logging verbosity" + type: enum + options: + - "trace" + - "debug" + - "info" + - "warn" + - "error" + - "critical" + - "off" + label: Log Level + group: "Agent Configuration" +- variable: datadog.tags + default: "" + description: "Host tags, separated by spaces. For example: 'simple-tag-0 tag-key-1:tag-value-1'" + type: string + label: Host Tags + group: "Agent Configuration" +- variable: datadog.criSocketPath + default: "" + description: "Path to the container runtime socket (if different from Docker), default to `/var/run/docker.sock`" + type: string + label: Path To The Container Runtime Socket(Optional) + group: "Agent Configuration" +- variable: datadog.dogstatsd.nonLocalTraffic + default: false + description: "Whether DogStatsD should listen to non local UDP traffic, required to send custom metrics" + type: boolean + label: DogStatsD Non-Local Traffic + group: "Agent Configuration" +- variable: datadog.collectEvents + default: false + description: "Enable event collection from the kubernetes API" + type: boolean + label: Collect Events + group: "Agent Configuration" +# Datadog Tagging +- variable: datadog.podLabelsAsTags + default: "" + description: "Specify a JSON map, where the map key is the source label name and the map value the datadog tag name. E.g: '{\"app\":\"kube_app\",\"release\":\"helm_release\"}'" + type: string + label: Extract Pod Labels as Tags + group: "Datadog Tagging" +- variable: datadog.podAnnotationsAsTags + default: "" + description: "Specify a JSON map, where the map key is the source label name and the map value the datadog tag name. E.g: '{\"app\":\"kube_app\",\"release\":\"helm_release\"}'" + type: string + label: Extract Pod Annotations as Tags + group: "Datadog Tagging" +- variable: datadog.nodeLabelsAsTags + default: "" + description: "Specify a JSON map, where the map key is the source label name and the map value the datadog tag name. E.g: '{\"app\":\"kube_app\",\"release\":\"helm_release\"}'" + type: string + label: Extract Node Labels As Tags + group: "Datadog Tagging" +# DaemonSet configurations +- variable: agents.useHostNetwork + default: false + description: "Bind ports on the hostNetwork. Useful for CNI networking where hostPort might not be supported. The ports will need to be available on all hosts" + type: boolean + label: Use HostNetwork + group: "Daemonset Configuration" +- variable: agents.dogstatsd.useHostPort + default: false + description: "Sets the hostPort to the same value of the container port" + type: boolean + label: Use HostPort + group: "Daemonset Configuration" +- variable: agents.useHostPID + default: false + description: "Run the agent in the host's PID namespace" + type: boolean + label: Use HostPID + group: "Daemonset Configuration" +# Optional Collection agents +- variable: datadog.apm.enabled + default: false + description: "Run the trace-agent along with the infrastructure agent, allowing the container to accept traces on 8126/tcp" + type: boolean + label: Enable APM + group: "Optional Collection Agents" +- variable: datadog.processAgent.enabled + default: false + description: "Enable live process collection in the process-agent" + type: boolean + label: Enable Live Process Agent + group: "Optional Collection Agents" +- variable: datadog.logs.enabled + default: false + description: "Run the log-agent along with the infrastructure agent" + type: boolean + label: Collect Logs + group: "Optional Collection Agents" + +#Kube State Metrics +- variable: kubeStateMetrics.enabled + default: true + description: "Create a kube-state-metrics deployment" + type: boolean + label: Deployment KubeState Metrics Deployment + group: "Kube-State-Metrics" diff --git a/charts/datadog/datadog/2.4.200/requirements.lock b/charts/datadog/datadog/2.4.200/requirements.lock new file mode 100644 index 000000000..2ffda047e --- /dev/null +++ b/charts/datadog/datadog/2.4.200/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-state-metrics + repository: https://charts.helm.sh/stable + version: 2.8.11 +digest: sha256:2ba76abd3b45ad9c43b7bef2126e96240281172e58135223393b749bf235e6b6 +generated: "2020-12-21T19:54:44.539642454Z" diff --git a/charts/datadog/datadog/2.4.200/requirements.yaml b/charts/datadog/datadog/2.4.200/requirements.yaml new file mode 100644 index 000000000..30a43801a --- /dev/null +++ b/charts/datadog/datadog/2.4.200/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: kube-state-metrics + version: "=2.8.11" + repository: https://charts.helm.sh/stable + condition: datadog.kubeStateMetricsEnabled diff --git a/charts/datadog/datadog/2.4.200/templates/NOTES.txt b/charts/datadog/datadog/2.4.200/templates/NOTES.txt new file mode 100644 index 000000000..ba3dac236 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/NOTES.txt @@ -0,0 +1,76 @@ +{{- if (or (.Values.datadog.apiKeyExistingSecret) (not (eq .Values.datadog.apiKey ""))) }} +Datadog agents are spinning up on each node in your cluster. After a few +minutes, you should see your agents starting in your event stream: + https://app.datadoghq.com/event/stream + + {{- if .Values.datadog.apiKeyExistingSecret }} +You disabled creation of Secret containing API key, therefore it is expected +that you create Secret named '{{ .Values.datadog.apiKeyExistingSecret }}' which includes a key called 'api-key' containing the API key. + {{- else if not (eq .Values.datadog.apiKey "") }} + {{- end }} +{{- else }} +############################################################################## +#### ERROR: You did not set a datadog.apiKey. #### +############################################################################## + +This deployment will be incomplete until you get your API key from Datadog. +One can sign up for a free Datadog trial at https://app.datadoghq.com/signup + +Once registered you can request an API key at: + + https://app.datadoghq.com/account/settings#agent/kubernetes + +Then run: + + helm upgrade {{ .Release.Name }} \ + --set datadog.apiKey=YOUR-KEY-HERE stable/datadog +{{- end }} + +{{- if .Values.clusterAgent.enabled }} + + {{- if .Values.clusterAgent.metricsProvider.enabled }} + {{- if .Values.datadog.appKeyExistingSecret }} +You disabled creation of Secret containing APP key, therefore it is expected +that you create a Secret named '{{ .Values.datadog.appKeyExistingSecret }}' which includes a key called 'app-key' containing the APP key. + {{- else if (.Values.datadog.appKey) }} + {{- else }} + +############################################################################## +#### ERROR: You did not set a datadog.appKey. #### +############################################################################## + +This deployment will be incomplete until you get your APP key from Datadog. +Create an application key at https://app.datadoghq.com/account/settings#api + {{- end }} + {{- end }} + +{{- end }} + +{{- if .Values.datadog.apm.enabled }} +The Datadog Agent is listening on port {{ .Values.datadog.apm.port }} for APM service. +{{- end }} + +{{- if .Values.datadog.autoconf }} + +################################################################# +#### WARNING: Deprecation notice #### +################################################################# + +The autoconf value is deprecated, Autodiscovery templates can now +be safely moved to the confd value. As a temporary measure, both +values were merged into the {{ template "datadog.fullname" . }}-confd configmap, +but this will be removed in a future chart release. +Please note that duplicate file names may have conflicted during +the merge. In that case, the confd entry will take precedence. +{{- end }} + +{{- if eq .Values.agents.image.repository "datadog/docker-dd-agent" }} + +###################################################################### +#### ERROR: Unsupported agent version #### +###################################################################### + +This version of the chart does not support deploying Agent 5.x. +If you cannot upgrade to Agent 6.x or later, you can use a previous version +of the chart by calling helm install with `--version 0.18.0`. +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/_helpers.tpl b/charts/datadog/datadog/2.4.200/templates/_helpers.tpl new file mode 100644 index 000000000..07035db99 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/_helpers.tpl @@ -0,0 +1,137 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "check-version" -}} +{{- if not .Values.agents.image.doNotCheckTag -}} +{{- $version := .Values.agents.image.tag | toString | trimSuffix "-jmx" -}} +{{- $length := len (split "." $version) -}} +{{- if and (eq $length 1) (eq $version "6") -}} +{{- $version = "6.19.0" -}} +{{- end -}} +{{- if and (eq $length 1) (eq $version "7") -}} +{{- $version = "7.19.0" -}} +{{- end -}} +{{- if and (eq $length 1) (eq $version "latest") -}} +{{- $version = "7.19.0" -}} +{{- end -}} +{{- if not (semverCompare "^6.19.0-0 || ^7.19.0-0" $version) -}} +{{- fail "This version of the chart requires an agent image 7.19.0 or greater. If you want to force and skip this check, use `--set agents.image.doNotCheckTag=true`" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "datadog.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +And depending on the resources the name is completed with an extension. +If release name contains chart name it will be used as a full name. +*/}} +{{- define "datadog.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "datadog.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return secret name to be used based on provided values. +*/}} +{{- define "datadog.apiSecretName" -}} +{{- $fullName := include "datadog.fullname" . -}} +{{- default $fullName .Values.datadog.apiKeyExistingSecret | quote -}} +{{- end -}} + +{{/* +Return secret name to be used based on provided values. +*/}} +{{- define "datadog.appKeySecretName" -}} +{{- $fullName := printf "%s-appkey" (include "datadog.fullname" .) -}} +{{- default $fullName .Values.datadog.appKeyExistingSecret | quote -}} +{{- end -}} + +{{/* +Return secret name to be used based on provided values. +*/}} +{{- define "clusterAgent.tokenSecretName" -}} +{{- if not .Values.clusterAgent.tokenExistingSecret -}} +{{- include "datadog.fullname" . -}}-cluster-agent +{{- else -}} +{{- .Values.clusterAgent.tokenExistingSecret -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC APIs. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if semverCompare "^1.8-0" .Capabilities.KubeVersion.GitVersion -}} +"rbac.authorization.k8s.io/v1" +{{- else -}} +"rbac.authorization.k8s.io/v1beta1" +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate os label +*/}} +{{- define "label.os" -}} +{{- if semverCompare "^1.14-0" .Capabilities.KubeVersion.GitVersion -}} +kubernetes.io/os +{{- else -}} +beta.kubernetes.io/os +{{- end -}} +{{- end -}} + +{{/* +Correct `clusterAgent.metricsProvider.service.port` if Kubernetes <= 1.15 +*/}} +{{- define "clusterAgent.metricsProvider.port" -}} +{{- if semverCompare "^1.15-0" .Capabilities.KubeVersion.GitVersion -}} +{{- .Values.clusterAgent.metricsProvider.service.port -}} +{{- else -}} +443 +{{- end -}} +{{- end -}} + +{{/* +Return the container runtime socket +*/}} +{{- define "datadog.dockerOrCriSocketPath" -}} +{{- if eq .Values.targetSystem "linux" -}} +{{- .Values.datadog.dockerSocketPath | default .Values.datadog.criSocketPath | default "/var/run/docker.sock" -}} +{{- end -}} +{{- if eq .Values.targetSystem "windows" -}} +\\.\pipe\docker_engine +{{- end -}} +{{- end -}} + +{{/* +Return agent config path +*/}} +{{- define "datadog.confPath" -}} +{{- if eq .Values.targetSystem "linux" -}} +/etc/datadog-agent +{{- end -}} +{{- if eq .Values.targetSystem "windows" -}} +C:/ProgramData/Datadog +{{- end -}} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-apiservice.yaml b/charts/datadog/datadog/2.4.200/templates/agent-apiservice.yaml new file mode 100644 index 000000000..2a7ab8331 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-apiservice.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.clusterAgent.rbac.create .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled -}} +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.external.metrics.k8s.io + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + service: + name: {{ template "datadog.fullname" . }}-cluster-agent-metrics-api + namespace: {{ .Release.Namespace }} +{{- if semverCompare "^1.15-0" .Capabilities.KubeVersion.GitVersion }} + port: {{ template "clusterAgent.metricsProvider.port" . }} +{{- end }} + version: v1beta1 + insecureSkipTLSVerify: true + group: external.metrics.k8s.io + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-deployment.yaml b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-deployment.yaml new file mode 100644 index 000000000..55020526a --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-deployment.yaml @@ -0,0 +1,191 @@ +{{- if and .Values.clusterAgent.enabled .Values.datadog.clusterChecks.enabled .Values.clusterChecksRunner.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "datadog.fullname" . }}-clusterchecks + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + replicas: {{ .Values.clusterChecksRunner.replicas }} + strategy: +{{ toYaml .Values.clusterChecksRunner.strategy | indent 4 }} + selector: + matchLabels: + app: {{ template "datadog.fullname" . }}-clusterchecks + template: + metadata: + labels: + app: {{ template "datadog.fullname" . }}-clusterchecks + name: {{ template "datadog.fullname" . }}-clusterchecks + annotations: + checksum/install_info: {{ printf "%s-%s" .Chart.Name .Chart.Version | sha256sum }} + {{- if .Values.datadog.checksd }} + checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }} + {{- end }} + spec: + {{- if .Values.clusterChecksRunner.rbac.dedicated }} + serviceAccountName: {{ if .Values.clusterChecksRunner.rbac.create }}{{ template "datadog.fullname" . }}-cluster-checks{{ else }}"{{ .Values.clusterChecksRunner.rbac.serviceAccountName }}"{{ end }} + {{- else }} + serviceAccountName: {{ if .Values.clusterChecksRunner.rbac.create }}{{ template "datadog.fullname" . }}{{ else }}"{{ .Values.clusterChecksRunner.rbac.serviceAccountName }}"{{ end }} + {{- end }} + imagePullSecrets: +{{ toYaml .Values.clusterChecksRunner.image.pullSecrets | indent 8 }} + {{- if .Values.clusterChecksRunner.dnsConfig }} + dnsConfig: +{{ toYaml .Values.clusterChecksRunner.dnsConfig | indent 8 }} + {{- end }} + initContainers: + - name: init-volume + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 10 }} + - name: init-config + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["bash", "-c"] + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + {{- if .Values.datadog.checksd }} + - name: checksd + mountPath: /checks.d + readOnly: true + {{- end }} + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 10 }} + containers: + - name: agent + image: "{{ .Values.clusterChecksRunner.image.repository }}:{{ .Values.clusterChecksRunner.image.tag }}" + command: ["bash", "-c"] + args: + - rm -rf /etc/datadog-agent/conf.d && touch /etc/datadog-agent/datadog.yaml && exec agent run + imagePullPolicy: {{ .Values.clusterChecksRunner.image.pullPolicy }} + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "datadog.apiSecretName" . }} + key: api-key + - name: KUBERNETES + value: "yes" + {{- if .Values.datadog.site }} + - name: DD_SITE + value: {{ .Values.datadog.site | quote }} + {{- end }} + {{- if .Values.datadog.dd_url }} + - name: DD_DD_URL + value: {{ .Values.datadog.dd_url | quote }} + {{- end }} + {{- if .Values.datadog.logLevel }} + - name: DD_LOG_LEVEL + value: {{ .Values.datadog.logLevel | quote }} + {{- end }} + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks" + - name: DD_HEALTH_PORT + value: "5555" + # Cluster checks + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ template "datadog.fullname" . }}-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ template "clusterAgent.tokenSecretName" . }} + key: token + - name: DD_CLUSTER_AGENT_ENABLED + value: {{ .Values.clusterAgent.enabled | quote }} + # Safely run alongside the daemonset + - name: DD_ENABLE_METADATA_COLLECTION + value: "false" + # Expose CLC stats + - name: DD_CLC_RUNNER_ENABLED + value: "true" + - name: DD_CLC_RUNNER_HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + # Remove unused features + - name: DD_USE_DOGSTATSD + value: "false" + - name: DD_PROCESS_AGENT_ENABLED + value: "false" + - name: DD_LOGS_ENABLED + value: "false" + - name: DD_APM_ENABLED + value: "false" + - name: DD_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{{- if .Values.clusterChecksRunner.env }} +{{ toYaml .Values.clusterChecksRunner.env | indent 10 }} +{{- end }} + resources: +{{ toYaml .Values.clusterChecksRunner.resources | indent 10 }} + volumeMounts: + - name: installinfo + subPath: install_info + {{- if eq .Values.targetSystem "windows" }} + mountPath: C:/ProgramData/Datadog/install_info + {{- else }} + mountPath: /etc/datadog-agent/install_info + {{- end }} + readOnly: true +{{- if .Values.clusterChecksRunner.volumeMounts }} + - name: config + mountPath: {{ template "datadog.confPath" . }} +{{ toYaml .Values.clusterChecksRunner.volumeMounts | indent 10 }} +{{- end }} + livenessProbe: +{{ toYaml .Values.clusterChecksRunner.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.clusterChecksRunner.readinessProbe | indent 10 }} + volumes: + - name: installinfo + configMap: + name: {{ template "datadog.fullname" . }}-installinfo +{{- if .Values.clusterChecksRunner.volumes }} +{{ toYaml .Values.clusterChecksRunner.volumes | indent 8 }} +{{- end }} + - name: config + emptyDir: {} +{{- if .Values.datadog.checksd }} + - name: checksd + configMap: + name: {{ template "datadog.fullname" . }}-checksd +{{- end }} + affinity: +{{- if .Values.clusterChecksRunner.affinity }} +{{ toYaml .Values.clusterChecksRunner.affinity | indent 8 }} +{{- else }} + # Ensure we only run one worker per node, to avoid name collisions + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "datadog.fullname" . }}-clusterchecks + topologyKey: kubernetes.io/hostname +{{- end }} + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- if .Values.clusterChecksRunner.nodeSelector }} +{{ toYaml .Values.clusterChecksRunner.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.clusterChecksRunner.tolerations }} + tolerations: +{{ toYaml .Values.clusterChecksRunner.tolerations | indent 8 }} + {{- end }} +{{ end }} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-pdb.yaml b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-pdb.yaml new file mode 100644 index 000000000..3acb7a752 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.clusterChecksRunner.createPodDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "datadog.fullname" . }}-clusterchecks + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: {{ template "datadog.fullname" . }}-clusterchecks +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-rbac.yaml b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-rbac.yaml new file mode 100644 index 000000000..46ff0f9cf --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-clusterchecks-rbac.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.clusterChecksRunner.rbac.create .Values.clusterAgent.enabled .Values.datadog.clusterChecks.enabled .Values.clusterChecksRunner.enabled .Values.clusterChecksRunner.rbac.dedicated -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-checks +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "datadog.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "datadog.fullname" . }}-cluster-checks + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-checks + {{- if .Values.clusterChecksRunner.rbac.serviceAccountAnnotations }} + annotations: {{ toYaml .Values.clusterChecksRunner.rbac.serviceAccountAnnotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-psp.yaml b/charts/datadog/datadog/2.4.200/templates/agent-psp.yaml new file mode 100644 index 000000000..719c5a1db --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-psp.yaml @@ -0,0 +1,33 @@ +{{- if .Values.agents.podSecurity.podSecurityPolicy.create}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "datadog.fullname" . }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: {{ join "," .Values.agents.podSecurity.seccompProfiles | quote }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: {{ join "," .Values.agents.podSecurity.apparmorProfiles | quote }} + seccomp.security.alpha.kubernetes.io/defaultProfileName: "runtime/default" + apparmor.security.beta.kubernetes.io/defaultProfileName: "runtime/default" +spec: + privileged: {{ .Values.agents.podSecurity.privileged }} + hostNetwork: {{ .Values.agents.useHostNetwork }} + hostPID: {{ .Values.datadog.dogstatsd.useHostPID }} + allowedCapabilities: +{{ toYaml .Values.agents.podSecurity.capabilites | indent 4 }} + volumes: +{{ toYaml .Values.agents.podSecurity.volumes | indent 4 }} + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: +{{ toYaml .Values.agents.podSecurity.securityContext | indent 4 }} + supplementalGroups: + rule: RunAsAny +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-scc.yaml b/charts/datadog/datadog/2.4.200/templates/agent-scc.yaml new file mode 100644 index 000000000..10edb45e1 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-scc.yaml @@ -0,0 +1,56 @@ +{{- if .Values.agents.podSecurity.securityContextConstraints.create }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ template "datadog.fullname" . }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +users: +- system:serviceaccount:{{ .Release.Namespace }}:{{ template "datadog.fullname" . }} +priority: 10 +# Allow host ports for dsd / trace intake ++allowHostPorts: {{ or .Values.datadog.dogstatsd.useHostPort .Values.datadog.apm.enabled }} +# Allow host PID for dogstatsd origin detection +allowHostPID: {{ .Values.datadog.dogstatsd.useHostPID }} +# Allow host network for the CRIO check to reach Prometheus through localhost +allowHostNetwork: {{ .Values.agents.useHostNetwork }} +# Allow hostPath for docker / process metrics +volumes: +{{ toYaml .Values.agents.podSecurity.volumes | indent 2 }} +# Use the `spc_t` selinux type to access the +# docker/cri socket + proc and cgroup stats +seLinuxContext: +{{ toYaml .Values.agents.podSecurity.securityContext | indent 2 }} +# system-probe requires some specific seccomp and capabilities +seccompProfiles: +{{ toYaml .Values.agents.podSecurity.seccompProfiles | indent 2 }} +allowedCapabilities: +{{ toYaml .Values.agents.podSecurity.capabilites | indent 2 }} +# +# The rest is copied from restricted SCC +# +allowHostDirVolumePlugin: true +allowHostIPC: false +allowPrivilegedContainer: {{ .Values.agents.podSecurity.privileged }} +allowedFlexVolumes: [] +defaultAddCapabilities: [] +fsGroup: + type: MustRunAs +readOnlyRootFilesystem: false +runAsUser: + type: RunAsAny +supplementalGroups: + type: RunAsAny +# If your environment restricts user access to the Docker socket or journald (for logging) +# create or use an existing group that has access and add the GID to +# the lines below (also remove the previous line, `type: RunAsAny`) +# type: MustRunAs +# ranges: +# - min: +# - max: +requiredDropCapabilities: [] +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/agent-secret.yaml b/charts/datadog/datadog/2.4.200/templates/agent-secret.yaml new file mode 100644 index 000000000..33e13d1bb --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-secret.yaml @@ -0,0 +1,23 @@ +{{- if not .Values.clusterAgent.tokenExistingSecret }} +{{- if .Values.clusterAgent.enabled -}} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +type: Opaque +data: + {{ if .Values.clusterAgent.token -}} + token: {{ .Values.clusterAgent.token | b64enc | quote }} + {{ else -}} + token: {{ randAlphaNum 32 | b64enc | quote }} + {{ end }} +{{- end }} + +{{ end }} \ No newline at end of file diff --git a/charts/datadog/datadog/2.4.200/templates/agent-services.yaml b/charts/datadog/datadog/2.4.200/templates/agent-services.yaml new file mode 100644 index 000000000..52954571d --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/agent-services.yaml @@ -0,0 +1,70 @@ +{{- if .Values.clusterAgent.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + type: ClusterIP + selector: + app: {{ template "datadog.fullname" . }}-cluster-agent + ports: + - port: 5005 + name: agentport + protocol: TCP +{{ end }} + +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent-metrics-api + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + type: {{ .Values.clusterAgent.metricsProvider.service.type }} + selector: + app: {{ template "datadog.fullname" . }}-cluster-agent + ports: + - port: {{ template "clusterAgent.metricsProvider.port" . }} + name: metricsapi + protocol: TCP +{{ end }} + +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.admissionController.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent-admission-controller + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + selector: + app: {{ template "datadog.fullname" . }}-cluster-agent + ports: + - port: 443 + targetPort: 8000 +{{ end }} diff --git a/charts/datadog/datadog/2.4.200/templates/checksd-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/checksd-configmap.yaml new file mode 100644 index 000000000..486b5f649 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/checksd-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.datadog.checksd }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-checksd + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }} +data: +{{ tpl (toYaml .Values.datadog.checksd) . | indent 2 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/cluster-agent-confd-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/cluster-agent-confd-configmap.yaml new file mode 100644 index 000000000..b39963055 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/cluster-agent-confd-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.clusterAgent.confd }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent-confd + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + checksum/confd-config: {{ tpl (toYaml .Values.clusterAgent.confd) . | sha256sum }} +data: +{{ tpl (toYaml .Values.clusterAgent.confd) . | indent 2 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/cluster-agent-config-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/cluster-agent-config-configmap.yaml new file mode 100644 index 000000000..6bfc98bde --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/cluster-agent-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if .Values.clusterAgent.datadog_cluster_yaml }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent-config + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + checksum/clusteragent-config: {{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | sha256sum }} +data: + datadog-cluster.yaml: | +{{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | indent 4 }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/cluster-agent-deployment.yaml b/charts/datadog/datadog/2.4.200/templates/cluster-agent-deployment.yaml new file mode 100644 index 000000000..2f7d0bd56 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/cluster-agent-deployment.yaml @@ -0,0 +1,245 @@ +{{- if .Values.clusterAgent.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + replicas: {{ .Values.clusterAgent.replicas }} + strategy: +{{- if .Values.clusterAgent.strategy }} +{{ toYaml .Values.clusterAgent.strategy | indent 4 }} +{{- else }} + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 +{{- end }} + selector: + matchLabels: + app: {{ template "datadog.fullname" . }}-cluster-agent + {{- if .Values.clusterAgent.podLabels }} +{{ toYaml .Values.clusterAgent.podLabels | indent 6 }} + {{- end }} + template: + metadata: + labels: + app: {{ template "datadog.fullname" . }}-cluster-agent + {{- if .Values.clusterAgent.podLabels }} +{{ toYaml .Values.clusterAgent.podLabels | indent 8 }} + {{- end }} + name: {{ template "datadog.fullname" . }}-cluster-agent + annotations: + checksum/install_info: {{ printf "%s-%s" .Chart.Name .Chart.Version | sha256sum }} + {{- if .Values.clusterAgent.datadog_cluster_yaml }} + checksum/clusteragent-config: {{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | sha256sum }} + {{- end }} + {{- if .Values.clusterAgent.confd }} + checksum/confd-config: {{ tpl (toYaml .Values.clusterAgent.confd) . | sha256sum }} + {{- end }} + ad.datadoghq.com/cluster-agent.check_names: '["prometheus"]' + ad.datadoghq.com/cluster-agent.init_configs: '[{}]' + ad.datadoghq.com/cluster-agent.instances: | + [{ + "prometheus_url": "http://%%host%%:5000/metrics", + "namespace": "datadog.cluster_agent", + "metrics": [ + "go_goroutines", "go_memstats_*", "process_*", + "api_requests", + "datadog_requests", "external_metrics", "rate_limit_queries_*", + "cluster_checks_*" + ] + }] + {{- if .Values.clusterAgent.podAnnotations }} +{{ toYaml .Values.clusterAgent.podAnnotations | indent 8 }} + {{- end }} + + spec: + {{- if .Values.clusterAgent.priorityClassName }} + priorityClassName: "{{ .Values.clusterAgent.priorityClassName }}" + {{- end }} + {{- if .Values.clusterAgent.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.clusterAgent.image.pullSecrets | indent 8 }} + {{- end }} + serviceAccountName: {{ if .Values.clusterAgent.rbac.create }}{{ template "datadog.fullname" . }}-cluster-agent{{ else }}"{{ .Values.clusterAgent.rbac.serviceAccountName }}"{{ end }} + {{- if .Values.clusterAgent.useHostNetwork }} + hostNetwork: {{ .Values.clusterAgent.useHostNetwork }} + dnsPolicy: ClusterFirstWithHostNet + {{- end }} + {{- if .Values.clusterAgent.dnsConfig }} + dnsConfig: +{{ toYaml .Values.clusterAgent.dnsConfig | indent 8 }} + {{- end }} + containers: + - name: cluster-agent + image: "{{ .Values.clusterAgent.image.repository }}:{{ .Values.clusterAgent.image.tag }}" + {{- with .Values.clusterAgent.command }} + command: {{ range . }} + - {{ . | quote }} + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.clusterAgent.image.pullPolicy }} + resources: +{{ toYaml .Values.clusterAgent.resources | indent 10 }} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + {{- if .Values.clusterAgent.metricsProvider.enabled }} + - containerPort: {{ template "clusterAgent.metricsProvider.port" . }} + name: metricsapi + protocol: TCP + {{- end }} + env: + - name: DD_HEALTH_PORT + value: {{ .Values.clusterAgent.healthPort | quote }} + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "datadog.apiSecretName" . }} + key: api-key + optional: true + {{- if .Values.clusterAgent.metricsProvider.enabled }} + - name: DD_APP_KEY + valueFrom: + secretKeyRef: + name: {{ template "datadog.appKeySecretName" . }} + key: app-key + - name: DD_EXTERNAL_METRICS_PROVIDER_ENABLED + value: {{ .Values.clusterAgent.metricsProvider.enabled | quote }} + - name: DD_EXTERNAL_METRICS_PROVIDER_PORT + value: {{ include "clusterAgent.metricsProvider.port" . | quote }} + - name: DD_EXTERNAL_METRICS_PROVIDER_WPA_CONTROLLER + value: {{ .Values.clusterAgent.metricsProvider.wpaController | quote }} + - name: DD_EXTERNAL_METRICS_PROVIDER_USE_DATADOGMETRIC_CRD + value: {{ .Values.clusterAgent.metricsProvider.useDatadogMetrics | quote }} + {{- end }} + {{- if .Values.clusterAgent.admissionController.enabled }} + - name: DD_ADMISSION_CONTROLLER_ENABLED + value: {{ .Values.clusterAgent.admissionController.enabled | quote }} + - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED + value: {{ .Values.clusterAgent.admissionController.mutateUnlabelled | quote }} + - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME + value: {{ template "datadog.fullname" . }}-cluster-agent-admission-controller + {{- end }} + {{- if .Values.datadog.clusterChecks.enabled }} + - name: DD_CLUSTER_CHECKS_ENABLED + value: {{ .Values.datadog.clusterChecks.enabled | quote }} + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + {{- end }} + {{- if .Values.datadog.clusterName }} + {{- if not (regexMatch "^([a-z]([a-z0-9\\-]{0,38}[a-z0-9])?\\.)*([a-z]([a-z0-9\\-]{0,38}[a-z0-9])?)$" .Values.datadog.clusterName) }} + {{- fail "Your `clusterName` isn’t valid. It must be dot-separated tokens where a token start with a lowercase letter followed by up to 39 lowercase letters, numbers, or hyphens and cannot end with a hyphen."}} + {{- end}} + - name: DD_CLUSTER_NAME + value: {{ .Values.datadog.clusterName | quote }} + {{- end }} + {{- if .Values.datadog.site }} + - name: DD_SITE + value: {{ .Values.datadog.site | quote }} + {{- end }} + {{- if .Values.datadog.dd_url }} + - name: DD_DD_URL + value: {{ .Values.datadog.dd_url | quote }} + {{- end }} + {{- if .Values.datadog.logLevel }} + - name: DD_LOG_LEVEL + value: {{ .Values.datadog.logLevel | quote }} + {{- end }} + - name: DD_LEADER_ELECTION + value: {{ default "true" .Values.datadog.leaderElection | quote}} + {{- if .Values.datadog.leaderLeaseDuration }} + - name: DD_LEADER_LEASE_DURATION + value: {{ .Values.datadog.leaderLeaseDuration | quote }} + {{- else if .Values.datadog.clusterChecks.enabled }} + - name: DD_LEADER_LEASE_DURATION + value: "15" + {{- end }} + {{- if .Values.datadog.collectEvents }} + - name: DD_COLLECT_KUBERNETES_EVENTS + value: {{ .Values.datadog.collectEvents | quote}} + {{- end }} + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ template "datadog.fullname" . }}-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ template "clusterAgent.tokenSecretName" . }} + key: token + - name: DD_KUBE_RESOURCES_NAMESPACE + value: {{ .Release.Namespace }} + {{- if .Values.datadog.orchestratorExplorer.enabled }} + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + {{- end }} +{{- if .Values.clusterAgent.env }} +{{ toYaml .Values.clusterAgent.env | indent 10 }} +{{- end }} + livenessProbe: +{{ toYaml .Values.clusterAgent.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.clusterAgent.readinessProbe | indent 10 }} + volumeMounts: + - name: installinfo + subPath: install_info + {{- if eq .Values.targetSystem "windows" }} + mountPath: C:/ProgramData/Datadog/install_info + {{- else }} + mountPath: /etc/datadog-agent/install_info + {{- end }} + readOnly: true +{{- if .Values.clusterAgent.volumeMounts }} +{{ toYaml .Values.clusterAgent.volumeMounts | indent 10 }} +{{- end }} +{{- if .Values.clusterAgent.confd }} + - name: confd + mountPath: /conf.d + readOnly: true +{{- end }} +{{- if .Values.clusterAgent.datadog_cluster_yaml }} + - name: cluster-agent-yaml + mountPath: /etc/datadog-agent/datadog-cluster.yaml + subPath: datadog-cluster.yaml + readOnly: true +{{- end}} + volumes: + - name: installinfo + configMap: + name: {{ template "datadog.fullname" . }}-installinfo +{{- if .Values.clusterAgent.confd }} + - name: confd + configMap: + name: {{ template "datadog.fullname" . }}-cluster-agent-confd +{{- end }} +{{- if .Values.clusterAgent.datadog_cluster_yaml }} + - name: cluster-agent-yaml + configMap: + name: {{ template "datadog.fullname" . }}-cluster-agent-config +{{- end}} + +{{- if .Values.clusterAgent.volumes }} +{{ toYaml .Values.clusterAgent.volumes | indent 8 }} +{{- end }} + {{- if .Values.clusterAgent.tolerations }} + tolerations: +{{ toYaml .Values.clusterAgent.tolerations | indent 8 }} + {{- end }} + {{- if .Values.clusterAgent.affinity }} + affinity: +{{ toYaml .Values.clusterAgent.affinity | indent 8 }} + {{- end }} + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- if .Values.clusterAgent.nodeSelector }} +{{ toYaml .Values.clusterAgent.nodeSelector | indent 8 }} + {{- end }} +{{ end }} diff --git a/charts/datadog/datadog/2.4.200/templates/cluster-agent-pdb.yaml b/charts/datadog/datadog/2.4.200/templates/cluster-agent-pdb.yaml new file mode 100644 index 000000000..3d341f0de --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/cluster-agent-pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.clusterAgent.createPodDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "datadog.fullname" . }}-cluster-agent + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + minAvailable: 1 + selector: + matchLabels: + app: {{ template "datadog.fullname" . }}-cluster-agent +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/cluster-agent-rbac.yaml b/charts/datadog/datadog/2.4.200/templates/cluster-agent-rbac.yaml new file mode 100644 index 000000000..5b722750d --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/cluster-agent-rbac.yaml @@ -0,0 +1,225 @@ +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-agent +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + - nodes + - componentstatuses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: ["quota.openshift.io"] + resources: + - clusterresourcequotas + verbs: + - get + - list +- apiGroups: + - "autoscaling" + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +{{- if .Values.datadog.collectEvents }} +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadogtoken # Kubernetes event collection state + verbs: + - get + - update +{{- end }} +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadog-leader-election # Leader election token +{{- if .Values.clusterAgent.metricsProvider.enabled }} + - datadog-custom-metrics + - extension-apiserver-authentication +{{- end }} + verbs: + - get + - update +- apiGroups: # To create the leader election token and hpa events + - "" + resources: + - configmaps + - events + verbs: + - create +- nonResourceURLs: + - "/version" + - "/healthz" + verbs: + - get +{{- if and .Values.clusterAgent.metricsProvider.enabled .Values.clusterAgent.metricsProvider.wpaController }} +- apiGroups: + - "datadoghq.com" + resources: + - "watermarkpodautoscalers" + verbs: + - "list" + - "get" + - "watch" +{{- end }} +{{- if .Values.datadog.orchestratorExplorer.enabled }} +- apiGroups: # to get the kube-system namespace UID and generate a cluster ID + - "" + resources: + - namespaces + resourceNames: + - "kube-system" + verbs: + - get +- apiGroups: # To create the cluster-id configmap + - "" + resources: + - configmaps + resourceNames: + - "datadog-cluster-id" + verbs: + - create + - get + - update +{{- end }} +{{- if and .Values.clusterAgent.metricsProvider.enabled .Values.clusterAgent.metricsProvider.useDatadogMetrics }} +- apiGroups: + - "datadoghq.com" + resources: + - "datadogmetrics" + verbs: + - "list" + - "create" + - "delete" + - "watch" +- apiGroups: + - "datadoghq.com" + resources: + - "datadogmetrics/status" + verbs: + - "update" +{{- end }} +{{- if .Values.clusterAgent.admissionController.enabled }} +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: ["get", "list", "watch", "update", "create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "update", "create"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get"] +- apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments"] + verbs: ["get"] +{{- end }} +--- +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "datadog.fullname" . }}-cluster-agent +subjects: + - kind: ServiceAccount + name: {{ template "datadog.fullname" . }}-cluster-agent + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-agent +{{- end }} + +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create .Values.clusterAgent.metricsProvider.enabled }} +--- +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + app: "{{ template "datadog.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }}-cluster-agent:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ template "datadog.fullname" . }}-cluster-agent + namespace: {{ .Release.Namespace }} +--- +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: "{{ template "datadog.fullname" . }}-cluster-agent" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ template "datadog.fullname" . }}-cluster-agent + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/confd-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/confd-configmap.yaml new file mode 100644 index 000000000..44d64966c --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/confd-configmap.yaml @@ -0,0 +1,26 @@ +{{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-confd + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + checksum/confd-config: {{ tpl (toYaml .Values.datadog.confd) . | sha256sum }} + checksum/autoconf-config: {{ tpl (toYaml .Values.datadog.autoconf) . | sha256sum }} +data: +{{/* +Merge the legacy autoconf dict before so confd static configurations +override duplicates +*/}} +{{- if .Values.datadog.autoconf }} +{{ tpl (toYaml .Values.datadog.autoconf) . | indent 2 }} +{{- end }} +{{- if .Values.datadog.confd }} +{{ tpl (toYaml .Values.datadog.confd) . | indent 2 }} +{{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/container-agent.yaml b/charts/datadog/datadog/2.4.200/templates/container-agent.yaml new file mode 100644 index 000000000..99ac7e2a2 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/container-agent.yaml @@ -0,0 +1,161 @@ +{{- define "container-agent" -}} +- name: agent + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["agent", "run"] + resources: +{{ toYaml .Values.agents.containers.agent.resources | indent 4 }} + ports: + - containerPort: {{ .Values.datadog.dogstatsd.port }} + {{- if .Values.datadog.dogstatsd.useHostPort }} + hostPort: {{ .Values.datadog.dogstatsd.port }} + {{- end }} + name: dogstatsdport + protocol: UDP + env: + {{- include "containers-common-env" . | nindent 4 }} + {{- if .Values.datadog.logLevel }} + - name: DD_LOG_LEVEL + value: {{ .Values.agents.containers.agent.logLevel | default .Values.datadog.logLevel | quote }} + {{- end }} + {{- if .Values.datadog.dogstatsd.port }} + - name: DD_DOGSTATSD_PORT + value: {{ .Values.datadog.dogstatsd.port | quote }} + {{- end }} + {{- if .Values.datadog.dogstatsd.nonLocalTraffic }} + - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC + value: {{ .Values.datadog.dogstatsd.nonLocalTraffic | quote }} + {{- end }} + {{- if .Values.datadog.dogstatsd.originDetection }} + - name: DD_DOGSTATSD_ORIGIN_DETECTION + value: {{ .Values.datadog.dogstatsd.originDetection | quote }} + {{- end }} + {{- if not .Values.clusterAgent.enabled }} + {{- if .Values.datadog.leaderElection }} + - name: DD_LEADER_ELECTION + value: {{ .Values.datadog.leaderElection | quote}} + {{- end }} + {{- if .Values.datadog.leaderLeaseDuration }} + - name: DD_LEADER_LEASE_DURATION + value: {{ .Values.datadog.leaderLeaseDuration | quote }} + {{- end }} + {{- if .Values.datadog.collectEvents }} + - name: DD_COLLECT_KUBERNETES_EVENTS + value: {{.Values.datadog.collectEvents | quote}} + {{- end }} + {{- else }} + - name: DD_CLUSTER_AGENT_ENABLED + value: {{ .Values.clusterAgent.enabled | quote }} + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ template "datadog.fullname" . }}-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ template "clusterAgent.tokenSecretName" . }} + key: token + {{- end }} + - name: DD_APM_ENABLED + value: "false" + - name: DD_LOGS_ENABLED + value: {{ (default false (or .Values.datadog.logs.enabled .Values.datadog.logsEnabled)) | quote}} + - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL + value: {{ (default false (or .Values.datadog.logs.containerCollectAll .Values.datadog.logsConfigContainerCollectAll)) | quote}} + - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE + value: {{ .Values.datadog.logs.containerCollectUsingFiles | quote }} + {{- if not .Values.datadog.livenessProbe }} + - name: DD_HEALTH_PORT + value: "5555" + {{- end }} + {{- if .Values.datadog.dogstatsd.useSocketVolume }} + - name: DD_DOGSTATSD_SOCKET + value: {{ .Values.datadog.dogstatsd.socketPath | quote }} + {{- end }} + {{- if .Values.datadog.clusterChecks.enabled }} + {{- if .Values.clusterChecksRunner.enabled }} + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "endpointschecks" + {{ else }} + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks endpointschecks" + {{- end }} + {{- end }} +{{- if .Values.agents.containers.agent.env }} +{{ toYaml .Values.agents.containers.agent.env | indent 4 }} +{{- end }} + volumeMounts: + - name: installinfo + subPath: install_info + {{- if eq .Values.targetSystem "windows" }} + mountPath: C:/ProgramData/Datadog/install_info + {{- else }} + mountPath: /etc/datadog-agent/install_info + {{- end }} + readOnly: true + - name: config + mountPath: {{ template "datadog.confPath" . }} + {{- if eq .Values.targetSystem "linux" }} + - name: runtimesocketdir + mountPath: {{ print "/host/" (dir (include "datadog.dockerOrCriSocketPath" .)) | clean }} + readOnly: true + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + - name: runtimesocket + mountPath: {{ template "datadog.dockerOrCriSocketPath" . }} + {{- end }} + {{- if .Values.agents.useConfigMap }} + - name: {{ template "datadog.fullname" . }}-datadog-yaml + mountPath: {{ template "datadog.confPath" . }}/datadog.yaml + subPath: datadog.yaml + {{- end }} + {{- if eq .Values.targetSystem "linux" }} + {{- if .Values.datadog.dogstatsd.useSocketVolume }} + - name: dsdsocket + mountPath: {{ (dir .Values.datadog.dogstatsd.socketPath) }} + {{- end }} + {{- if .Values.datadog.systemProbe.enabled }} + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + readOnly: true + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + {{- end }} + - name: procdir + mountPath: /host/proc + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + {{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }} + - name: pointerdir + mountPath: /opt/datadog-agent/run + - name: logpodpath + mountPath: /var/log/pods + readOnly: true + {{- if not .Values.datadog.criSocketPath }} + - name: logdockercontainerpath + mountPath: /var/lib/docker/containers + readOnly: true + {{- end }} + {{- end }} + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + {{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }} + - name: pointerdir + mountPath: C:/var/log + - name: logpodpath + mountPath: C:/var/log/pods + readOnly: true + - name: logdockercontainerpath + mountPath: C:/ProgramData/docker/containers + readOnly: true + {{- end }} + {{- end }} +{{- if .Values.agents.volumeMounts }} +{{ toYaml .Values.agents.volumeMounts | indent 4 }} +{{- end }} + livenessProbe: +{{ toYaml .Values.agents.containers.agent.livenessProbe | indent 4 }} + readinessProbe: +{{ toYaml .Values.agents.containers.agent.readinessProbe | indent 4 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/container-process-agent.yaml b/charts/datadog/datadog/2.4.200/templates/container-process-agent.yaml new file mode 100644 index 000000000..d0c60c54f --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/container-process-agent.yaml @@ -0,0 +1,72 @@ +{{- define "container-process-agent" -}} +- name: process-agent + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + {{- if eq .Values.targetSystem "linux" }} + command: ["process-agent", "-config={{ template "datadog.confPath" . }}/datadog.yaml"] + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + command: ["process-agent", "-foreground", "-config={{ template "datadog.confPath" . }}/datadog.yaml"] + {{- end }} + resources: +{{ toYaml .Values.agents.containers.processAgent.resources | indent 4 }} + env: + {{- include "containers-common-env" . | nindent 4 }} + {{- if .Values.datadog.processAgent.processCollection }} + - name: DD_PROCESS_AGENT_ENABLED + value: "true" + {{- end }} + - name: DD_LOG_LEVEL + value: {{ .Values.agents.containers.processAgent.logLevel | default .Values.datadog.logLevel | quote }} + {{- if .Values.datadog.systemProbe.enabled }} + - name: DD_SYSTEM_PROBE_ENABLED + value: {{ .Values.datadog.systemProbe.enabled | quote }} + {{- end }} + {{- if .Values.datadog.orchestratorExplorer.enabled }} + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_CLUSTER_ID + valueFrom: + configMapKeyRef: + name: datadog-cluster-id + key: id + {{- end }} +{{- if .Values.agents.containers.processAgent.env }} +{{ toYaml .Values.agents.containers.processAgent.env | indent 4 }} +{{- end }} + volumeMounts: + - name: config + mountPath: {{ template "datadog.confPath" . }} + {{- if eq .Values.targetSystem "linux" }} + - name: runtimesocketdir + mountPath: {{ print "/host/" (dir (include "datadog.dockerOrCriSocketPath" .)) | clean }} + readOnly: true + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + - name: runtimesocket + mountPath: {{ template "datadog.dockerOrCriSocketPath" . }} + {{- end }} + {{- if .Values.agents.useConfigMap }} + - name: {{ template "datadog.fullname" . }}-datadog-yaml + mountPath: {{ template "datadog.confPath" . }}/datadog.yaml + subPath: datadog.yaml + {{- end }} + {{- if eq .Values.targetSystem "linux" }} + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + - name: passwd + mountPath: /etc/passwd + - name: procdir + mountPath: /host/proc + readOnly: true + {{- if .Values.datadog.systemProbe.enabled }} + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + readOnly: true + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/container-system-probe.yaml b/charts/datadog/datadog/2.4.200/templates/container-system-probe.yaml new file mode 100644 index 000000000..e56b3f6b6 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/container-system-probe.yaml @@ -0,0 +1,33 @@ +{{- define "container-system-probe" -}} +- name: system-probe + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + securityContext: + capabilities: + add: ["SYS_ADMIN", "SYS_RESOURCE", "SYS_PTRACE", "NET_ADMIN", "IPC_LOCK"] + command: ["/opt/datadog-agent/embedded/bin/system-probe", "--config=/etc/datadog-agent/system-probe.yaml"] + env: + - name: DD_LOG_LEVEL + value: {{ .Values.agents.containers.systemProbe.logLevel | default .Values.datadog.logLevel | quote }} +{{- if .Values.agents.containers.systemProbe.env }} +{{ toYaml .Values.agents.containers.systemProbe.env | indent 4 }} +{{- end }} + resources: +{{ toYaml .Values.agents.containers.systemProbe.resources | indent 4 }} + volumeMounts: + - name: debugfs + mountPath: /sys/kernel/debug + - name: sysprobe-config + mountPath: /etc/datadog-agent + - name: sysprobe-socket-dir + mountPath: /var/run/sysprobe + - name: procdir + mountPath: /host/proc + readOnly: true + - name: modules + mountPath: /lib/modules + readOnly: true + - name: src + mountPath: /usr/src + readOnly: true +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/container-trace-agent.yaml b/charts/datadog/datadog/2.4.200/templates/container-trace-agent.yaml new file mode 100644 index 000000000..ff64c5113 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/container-trace-agent.yaml @@ -0,0 +1,58 @@ +{{- define "container-trace-agent" -}} +- name: trace-agent + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + {{- if eq .Values.targetSystem "linux" }} + command: ["trace-agent", "-config={{ template "datadog.confPath" . }}/datadog.yaml"] + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + command: ["trace-agent", "-foreground", "-config={{ template "datadog.confPath" . }}/datadog.yaml"] + {{- end }} + resources: +{{ toYaml .Values.agents.containers.traceAgent.resources | indent 4 }} + ports: + - containerPort: {{ .Values.datadog.apm.port }} + hostPort: {{ .Values.datadog.apm.port }} + name: traceport + protocol: TCP + env: + {{- include "containers-common-env" . | nindent 4 }} + - name: DD_LOG_LEVEL + value: {{ .Values.agents.containers.traceAgent.logLevel | default .Values.datadog.logLevel | quote }} + - name: DD_APM_ENABLED + value: "true" + - name: DD_APM_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_APM_RECEIVER_PORT + value: {{ .Values.datadog.apm.port | quote }} + {{- if .Values.datadog.apm.useSocketVolume }} + - name: DD_APM_RECEIVER_SOCKET + value: {{ .Values.datadog.apm.socketPath | quote }} + {{- end }} +{{- if .Values.agents.containers.traceAgent.env }} +{{ toYaml .Values.agents.containers.traceAgent.env | indent 4 }} +{{- end }} + volumeMounts: + - name: config + mountPath: {{ template "datadog.confPath" . }} + {{- if .Values.agents.useConfigMap }} + - name: {{ template "datadog.fullname" . }}-datadog-yaml + mountPath: {{ template "datadog.confPath" . }}/datadog.yaml + subPath: datadog.yaml + {{- end }} + {{- if eq .Values.targetSystem "linux" }} + - name: runtimesocketdir + mountPath: {{ print "/host/" (dir (include "datadog.dockerOrCriSocketPath" .)) | clean }} + readOnly: true + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + - name: runtimesocket + mountPath: {{ template "datadog.dockerOrCriSocketPath" . }} + {{- end }} + {{- if .Values.datadog.apm.useSocketVolume }} + - name: apmsocket + mountPath: {{ (dir .Values.datadog.apm.socketPath) }} + {{- end }} + livenessProbe: +{{ toYaml .Values.agents.containers.traceAgent.livenessProbe | indent 4 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/containers-common-env.yaml b/charts/datadog/datadog/2.4.200/templates/containers-common-env.yaml new file mode 100644 index 000000000..d8dd0fba6 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/containers-common-env.yaml @@ -0,0 +1,95 @@ +# The purpose of this template is to define a minimal set of environment +# variables required to operate dedicated containers in the daemonset +{{- define "containers-common-env" -}} +- name: DD_API_KEY + valueFrom: + secretKeyRef: + name: {{ template "datadog.apiSecretName" . }} + key: api-key +{{- if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion }} +- name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP +{{- end }} +{{- if .Values.datadog.clusterName }} +{{- if not (regexMatch "^([a-z]([a-z0-9\\-]{0,38}[a-z0-9])?\\.)*([a-z]([a-z0-9\\-]{0,38}[a-z0-9])?)$" .Values.datadog.clusterName) }} +{{- fail "Your `clusterName` isn’t valid. It must be dot-separated tokens where a token start with a lowercase letter followed by up to 39 lowercase letters, numbers, or hyphens and cannot end with a hyphen."}} +{{- end}} +- name: DD_CLUSTER_NAME + value: {{ .Values.datadog.clusterName | quote }} +{{- end }} +{{- if .Values.datadog.tags }} +- name: DD_TAGS + value: {{ .Values.datadog.tags | join " " | quote }} +{{- end }} +{{- if .Values.datadog.nodeLabelsAsTags }} +- name: DD_KUBERNETES_NODE_LABELS_AS_TAGS + value: '{{ toJson .Values.datadog.nodeLabelsAsTags }}' +{{- end }} +{{- if .Values.datadog.podLabelsAsTags }} +- name: DD_KUBERNETES_POD_LABELS_AS_TAGS + value: '{{ toJson .Values.datadog.podLabelsAsTags }}' +{{- end }} +{{- if .Values.datadog.podAnnotationsAsTags }} +- name: DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS + value: '{{ toJson .Values.datadog.podAnnotationsAsTags }}' +{{- end }} +- name: KUBERNETES + value: "yes" +{{- if .Values.datadog.site }} +- name: DD_SITE + value: {{ .Values.datadog.site | quote }} +{{- end }} +{{- if .Values.datadog.dd_url }} +- name: DD_DD_URL + value: {{ .Values.datadog.dd_url | quote }} +{{- end }} +{{- if .Values.datadog.env }} +{{ toYaml .Values.datadog.env }} +{{- end }} +{{- if .Values.datadog.acInclude }} +- name: DD_AC_INCLUDE + value: {{ .Values.datadog.acInclude | quote }} +{{- end }} +{{- if .Values.datadog.acExclude }} +- name: DD_AC_EXCLUDE + value: {{ .Values.datadog.acExclude | quote }} +{{- end }} +{{- if .Values.datadog.containerInclude }} +- name: DD_CONTAINER_INCLUDE + value: {{ .Values.datadog.containerInclude | quote }} +{{- end }} +{{- if .Values.datadog.containerExclude }} +- name: DD_CONTAINER_EXCLUDE + value: {{ .Values.datadog.containerExclude | quote }} +{{- end }} +{{- if .Values.datadog.containerIncludeMetrics }} +- name: DD_CONTAINER_INCLUDE_METRICS + value: {{ .Values.datadog.containerIncludeMetrics | quote }} +{{- end }} +{{- if .Values.datadog.containerExcludeMetrics }} +- name: DD_CONTAINER_EXCLUDE_METRICS + value: {{ .Values.datadog.containerExcludeMetrics | quote }} +{{- end }} +{{- if .Values.datadog.containerIncludeLogs }} +- name: DD_CONTAINER_INCLUDE_LOGS + value: {{ .Values.datadog.containerIncludeLogs | quote }} +{{- end }} +{{- if .Values.datadog.containerExcludeLogs }} +- name: DD_CONTAINER_EXCLUDE_LOGS + value: {{ .Values.datadog.containerExcludeLogs | quote }} +{{- end }} +{{- if .Values.datadog.criSocketPath }} +- name: DD_CRI_SOCKET_PATH + value: {{ print "/host/" .Values.datadog.criSocketPath | clean }} +{{- else }} +- name: DOCKER_HOST +{{- if eq .Values.targetSystem "linux" }} + value: unix://{{ print "/host/" (include "datadog.dockerOrCriSocketPath" .) | clean }} +{{- end }} +{{- if eq .Values.targetSystem "windows" }} + value: npipe://{{ (include "datadog.dockerOrCriSocketPath" .) | replace "\\" "/" }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/containers-init-linux.yaml b/charts/datadog/datadog/2.4.200/templates/containers-init-linux.yaml new file mode 100644 index 000000000..bfb0ef1ea --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/containers-init-linux.yaml @@ -0,0 +1,51 @@ +{{- define "containers-init-linux" -}} +- name: init-volume + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }} +- name: init-config + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["bash", "-c"] + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + {{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }} + - name: confd + mountPath: /conf.d + readOnly: true + {{- end }} + {{- if .Values.datadog.checksd }} + - name: checksd + mountPath: /checks.d + readOnly: true + {{- end }} + - name: procdir + mountPath: /host/proc + readOnly: true + - name: runtimesocketdir + mountPath: {{ print "/host/" (dir (include "datadog.dockerOrCriSocketPath" .)) | clean }} + readOnly: true + {{- if .Values.datadog.systemProbe.enabled }} + - name: sysprobe-config + mountPath: /etc/datadog-agent/system-probe.yaml + subPath: system-probe.yaml + {{- end }} + env: + {{- include "containers-common-env" . | nindent 4 }} + {{- if and (not .Values.clusterAgent.enabled) .Values.datadog.leaderElection }} + - name: DD_LEADER_ELECTION + value: {{ .Values.datadog.leaderElection | quote }} + {{- end }} + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/containers-init-windows.yaml b/charts/datadog/datadog/2.4.200/templates/containers-init-windows.yaml new file mode 100644 index 000000000..e5ae9ec88 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/containers-init-windows.yaml @@ -0,0 +1,38 @@ +{{- define "containers-init-windows" -}} +- name: init-volume + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["pwsh", "-Command"] + args: + - Copy-Item -Recurse -Force {{ template "datadog.confPath" . }} C:/Temp + volumeMounts: + - name: config + mountPath: C:/Temp/Datadog + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }} +- name: init-config + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + imagePullPolicy: {{ .Values.agents.image.pullPolicy }} + command: ["pwsh", "-Command"] + args: + - Get-ChildItem 'entrypoint-ps1' | ForEach-Object { & $_.FullName if (-Not $?) { exit 1 } } + volumeMounts: + - name: config + mountPath: {{ template "datadog.confPath" . }} + {{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }} + - name: confd + mountPath: C:/conf.d + readOnly: true + {{- end }} + {{- if .Values.datadog.checksd }} + - name: checksd + mountPath: C:/checks.d + readOnly: true + {{- end }} + - name: runtimesocket + mountPath: {{ template "datadog.dockerOrCriSocketPath" . }} + env: + {{- include "containers-common-env" . | nindent 4 }} + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-linux.yaml b/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-linux.yaml new file mode 100644 index 000000000..17529c31b --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-linux.yaml @@ -0,0 +1,69 @@ +{{- define "daemonset-volumes-linux" -}} +- hostPath: + path: /proc + name: procdir +- hostPath: + path: /sys/fs/cgroup + name: cgroups +{{- if .Values.datadog.dogstatsd.useSocketVolume }} +- hostPath: + path: {{ .Values.datadog.dogstatsd.hostSocketPath }} + type: DirectoryOrCreate + name: dsdsocket +{{- end }} +{{- if .Values.datadog.apm.useSocketVolume }} +- hostPath: + path: {{ .Values.datadog.apm.hostSocketPath }} + type: DirectoryOrCreate + name: apmsocket +{{- end }} +- name: s6-run + emptyDir: {} +{{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }} +- name: confd + configMap: + name: {{ template "datadog.fullname" . }}-confd +{{- end }} +{{- if .Values.datadog.systemProbe.enabled }} +- name: sysprobe-config + configMap: + name: {{ template "datadog.fullname" . }}-system-probe-config +{{- if eq .Values.datadog.systemProbe.seccomp "localhost/system-probe" }} +- name: datadog-agent-security + configMap: + name: {{ template "datadog.fullname" . }}-security +- hostPath: + path: {{ .Values.datadog.systemProbe.seccompRoot }} + name: seccomp-root +{{- end }} +- hostPath: + path: /sys/kernel/debug + name: debugfs +- name: sysprobe-socket-dir + emptyDir: {} +- hostPath: + path: /lib/modules + name: modules +- hostPath: + path: /usr/src + name: src +{{- end }} +{{- if or .Values.datadog.processAgent.enabled .Values.datadog.systemProbe.enabled }} +- hostPath: + path: /etc/passwd + name: passwd +{{- end }} +{{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }} +- hostPath: + path: "/var/lib/datadog-agent/logs" + name: pointerdir +- hostPath: + path: /var/log/pods + name: logpodpath +{{- if not .Values.datadog.criSocketPath }} +- hostPath: + path: /var/lib/docker/containers + name: logdockercontainerpath +{{- end }} +{{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-windows.yaml b/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-windows.yaml new file mode 100644 index 000000000..eb9e0fd52 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/daemonset-volumes-windows.yaml @@ -0,0 +1,13 @@ +{{- define "daemonset-volumes-windows" -}} +{{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }} +- hostPath: + path: C:/var/log + name: pointerdir +- hostPath: + path: C:/var/log/pods + name: logpodpath +- hostPath: + path: C:/ProgramData/docker/containers + name: logdockercontainerpath +{{- end }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/daemonset.yaml b/charts/datadog/datadog/2.4.200/templates/daemonset.yaml new file mode 100644 index 000000000..5f3e3a90b --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/daemonset.yaml @@ -0,0 +1,150 @@ +{{- template "check-version" . }} +{{- if .Values.agents.enabled }} +{{- if (or (.Values.datadog.apiKeyExistingSecret) (.Values.datadog.apiKey)) }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "datadog.fullname" . }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +spec: + selector: + matchLabels: + app: {{ template "datadog.fullname" . }} + {{- if .Values.agents.podLabels }} +{{ toYaml .Values.agents.podLabels | indent 6 }} + {{- end }} + template: + metadata: + labels: + app: {{ template "datadog.fullname" . }} + {{- if .Values.agents.podLabels }} +{{ toYaml .Values.agents.podLabels | indent 8 }} + {{- end }} + name: {{ template "datadog.fullname" . }} + annotations: + checksum/install_info: {{ printf "%s-%s" .Chart.Name .Chart.Version | sha256sum }} + checksum/autoconf-config: {{ tpl (toYaml .Values.datadog.autoconf) . | sha256sum }} + checksum/confd-config: {{ tpl (toYaml .Values.datadog.confd) . | sha256sum }} + checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }} + {{- if .Values.agents.customAgentConfig }} + checksum/agent-config: {{ tpl (toYaml .Values.agents.customAgentConfig) . | sha256sum }} + {{- end }} + {{- if .Values.datadog.systemProbe.enabled }} + container.apparmor.security.beta.kubernetes.io/system-probe: {{ .Values.datadog.systemProbe.apparmor }} + container.seccomp.security.alpha.kubernetes.io/system-probe: {{ .Values.datadog.systemProbe.seccomp }} + {{- end }} + {{- if .Values.agents.podAnnotations }} +{{ toYaml .Values.agents.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.datadog.securityContext }} + securityContext: +{{ toYaml .Values.datadog.securityContext| indent 8 }} + {{- else if or .Values.agents.podSecurity.podSecurityPolicy.create .Values.agents.podSecurity.securityContextConstraints.create -}} + {{- if and (.Values.agents.podSecurity.securityContext) .Values.agents.podSecurity.securityContext.seLinuxOptions }} + securityContext: + seLinuxOptions: +{{ toYaml .Values.agents.podSecurity.securityContext.seLinuxOptions | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.agents.useHostNetwork }} + hostNetwork: {{ .Values.agents.useHostNetwork }} + dnsPolicy: ClusterFirstWithHostNet + {{- end }} + {{- if .Values.agents.dnsConfig }} + dnsConfig: +{{ toYaml .Values.agents.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.datadog.dogstatsd.useHostPID }} + hostPID: {{ .Values.datadog.dogstatsd.useHostPID }} + {{- end }} + {{- if .Values.agents.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.agents.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.agents.priorityClassName }} + priorityClassName: {{ .Values.agents.priorityClassName }} + {{- end }} + containers: + {{- include "container-agent" . | nindent 6 }} + {{- if .Values.datadog.apm.enabled }} + {{- include "container-trace-agent" . | nindent 6 }} + {{- end }} + {{- if .Values.datadog.processAgent.enabled }} + {{- include "container-process-agent" . | nindent 6 }} + {{- end }} + {{- if .Values.datadog.systemProbe.enabled }} + {{- include "container-system-probe" . | nindent 6 }} + {{- end }} + initContainers: + {{- if eq .Values.targetSystem "windows" }} + {{ include "containers-init-windows" . | nindent 6 }} + {{- end }} + {{- if eq .Values.targetSystem "linux" }} + {{ include "containers-init-linux" . | nindent 6 }} + {{- end }} + {{- if and .Values.datadog.systemProbe.enabled (eq .Values.datadog.systemProbe.seccomp "localhost/system-probe") }} + {{ include "system-probe-init" . | nindent 6 }} + {{- end }} + volumes: + - name: installinfo + configMap: + name: {{ template "datadog.fullname" . }}-installinfo + - name: config + emptyDir: {} + {{- if eq .Values.targetSystem "linux" }} + - hostPath: + path: {{ dir (include "datadog.dockerOrCriSocketPath" .) }} + name: runtimesocketdir + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + - hostPath: + path: {{ template "datadog.dockerOrCriSocketPath" . }} + name: runtimesocket + {{- end }} + {{- if .Values.datadog.checksd }} + - name: checksd + configMap: + name: {{ template "datadog.fullname" . }}-checksd + {{- end }} + {{- if .Values.agents.useConfigMap }} + - name: {{ template "datadog.fullname" . }}-datadog-yaml + configMap: + name: {{ template "datadog.fullname" . }}-datadog-yaml + {{- end }} + {{- if eq .Values.targetSystem "windows" }} + {{ include "daemonset-volumes-windows" . | nindent 6 }} + {{- end }} + {{- if eq .Values.targetSystem "linux" }} + {{ include "daemonset-volumes-linux" . | nindent 6 }} + {{- end }} +{{- if .Values.agents.volumes }} +{{ toYaml .Values.agents.volumes | indent 6 }} +{{- end }} + tolerations: + {{- if eq .Values.targetSystem "windows" }} + - effect: NoSchedule + key: node.kubernetes.io/os + value: windows + operator: Equal + {{- end }} + {{- if .Values.agents.tolerations }} +{{ toYaml .Values.agents.tolerations | indent 6 }} + {{- end }} + affinity: +{{ toYaml .Values.agents.affinity | indent 8 }} + serviceAccountName: {{ if .Values.agents.rbac.create }}{{ template "datadog.fullname" . }}{{ else }}"{{ .Values.agents.rbac.serviceAccountName }}"{{ end }} + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- if .Values.agents.nodeSelector }} +{{ toYaml .Values.agents.nodeSelector | indent 8 }} + {{- end }} + updateStrategy: +{{ toYaml .Values.agents.updateStrategy | indent 4 }} +{{ end }} +{{ end }} diff --git a/charts/datadog/datadog/2.4.200/templates/datadog-yaml-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/datadog-yaml-configmap.yaml new file mode 100644 index 000000000..cfb827dfe --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/datadog-yaml-configmap.yaml @@ -0,0 +1,51 @@ +{{- if .Values.agents.useConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-datadog-yaml + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + {{- if .Values.agents.customAgentConfig }} + checksum/agent-config: {{ tpl (toYaml .Values.agents.customAgentConfig) . | sha256sum }} + {{- end }} +data: + datadog.yaml: | + {{- if .Values.agents.customAgentConfig }} +{{ tpl (toYaml .Values.agents.customAgentConfig) . | indent 4 }} + {{- else }} + ## Provides autodetected defaults, for kubernetes environments, + ## please see datadog.yaml.example for all supported options + + # Autodiscovery for Kubernetes + listeners: + - name: kubelet + config_providers: + - name: kubelet + polling: true + + # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration + apm_config: + enabled: true + apm_non_local_traffic: true + max_memory: 0 + max_cpu_percent: 0 + + {{- $version := (.Values.agents.image.tag | toString | trimSuffix "-jmx") }} + {{- $length := len (split "." $version ) -}} + {{- if and (eq $length 1) (ge $version "6") -}} + {{- $version := "6.15" }} + {{- end -}} + {{ if semverCompare ">=6.15" $version }} + # Enable java container awareness (agent version >= 6.15) + jmx_use_container_support: true + {{ else }} + # Enable java cgroup memory awareness (agent version < 6.15) + jmx_use_cgroup_memory_limit: true + {{ end }} + {{- end }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/hpa-external-metrics-rbac.yaml b/charts/datadog/datadog/2.4.200/templates/hpa-external-metrics-rbac.yaml new file mode 100644 index 000000000..4ba6c9557 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/hpa-external-metrics-rbac.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create .Values.clusterAgent.metricsProvider.enabled .Values.clusterAgent.metricsProvider.createReaderRbac -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- if contains "-gke." .Capabilities.KubeVersion.GitVersion }} + name: external-metrics-reader +{{- else }} + name: {{ template "datadog.fullname" . }}-cluster-agent-external-metrics-reader +{{- end }} +rules: +- apiGroups: + - "external.metrics.k8s.io" + resources: + - "*" + verbs: + - list + - get + - watch +--- +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- if contains "-gke." .Capabilities.KubeVersion.GitVersion }} + name: external-metrics-reader +{{- else }} + name: {{ template "datadog.fullname" . }}-cluster-agent-external-metrics-reader +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if contains "-gke." .Capabilities.KubeVersion.GitVersion }} + name: external-metrics-reader +{{- else }} + name: {{ template "datadog.fullname" . }}-cluster-agent-external-metrics-reader +{{- end }} +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/install_info-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/install_info-configmap.yaml new file mode 100644 index 000000000..52059f8ae --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/install_info-configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-installinfo + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + annotations: + checksum/install_info: {{ printf "%s-%s" .Chart.Name .Chart.Version | sha256sum }} +data: + install_info: | + --- + install_method: + tool: helm + tool_version: {{ .Release.Service }} + installer_version: {{ .Chart.Name }}-{{ .Chart.Version }} diff --git a/charts/datadog/datadog/2.4.200/templates/rbac.yaml b/charts/datadog/datadog/2.4.200/templates/rbac.yaml new file mode 100644 index 000000000..74a5b044c --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/rbac.yaml @@ -0,0 +1,116 @@ +{{- if .Values.agents.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }} +rules: +{{- if not .Values.clusterAgent.enabled }} +- apiGroups: + - "" + resources: + - services + - events + - endpoints + - pods + - nodes + - componentstatuses + verbs: + - get + - list + - watch +- apiGroups: ["quota.openshift.io"] + resources: + - clusterresourcequotas + verbs: + - get + - list +{{- if .Values.datadog.collectEvents }} +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadogtoken # Kubernetes event collection state + verbs: + - get + - update +{{- end }} +{{- if .Values.datadog.leaderElection }} +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadog-leader-election # Leader election token + verbs: + - get + - update +- apiGroups: # To create the leader election token + - "" + resources: + - configmaps + verbs: + - create +{{- end }} +- nonResourceURLs: + - "/version" + - "/healthz" + verbs: + - get +{{- end }} +- nonResourceURLs: + - "/metrics" + verbs: + - get +- apiGroups: # Kubelet connectivity + - "" + resources: + - nodes/metrics + - nodes/spec + - nodes/proxy + - nodes/stats + verbs: + - get +- apiGroups: # leader election check + - "" + resources: + - endpoints + verbs: + - get +--- +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "datadog.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "datadog.fullname" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + name: {{ template "datadog.fullname" . }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/templates/secrets.yaml b/charts/datadog/datadog/2.4.200/templates/secrets.yaml new file mode 100644 index 000000000..c386893bb --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/secrets.yaml @@ -0,0 +1,38 @@ +# API Key +{{- if not .Values.datadog.apiKeyExistingSecret }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "datadog.fullname" . }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +type: Opaque +data: + api-key: {{ default "MISSING" .Values.datadog.apiKey | b64enc | quote }} + +{{- end }} + +# APP Key +{{- if not .Values.datadog.appKeyExistingSecret }} +{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "datadog.appKeySecretName" . }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +type: Opaque +data: + app-key: {{ default "MISSING" .Values.datadog.appKey | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/system-probe-configmap.yaml b/charts/datadog/datadog/2.4.200/templates/system-probe-configmap.yaml new file mode 100644 index 000000000..40059e149 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/system-probe-configmap.yaml @@ -0,0 +1,218 @@ +{{- if .Values.datadog.systemProbe.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-system-probe-config + namespace: {{ $.Release.Namespace }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +data: + system-probe.yaml: | + system_probe_config: + enabled: {{ $.Values.datadog.systemProbe.enabled }} + debug_port: {{ $.Values.datadog.systemProbe.debugPort }} + sysprobe_socket: /var/run/sysprobe/sysprobe.sock + enable_conntrack: {{ $.Values.datadog.systemProbe.enableConntrack }} + bpf_debug: {{ $.Values.datadog.systemProbe.bpfDebug }} + enable_tcp_queue_length: {{ $.Values.datadog.systemProbe.enableTCPQueueLength }} + enable_oom_kill: {{ $.Values.datadog.systemProbe.enableOOMKill }} + collect_dns_stats: {{ $.Values.datadog.systemProbe.collectDNSStats }} + +{{- if eq .Values.datadog.systemProbe.seccomp "localhost/system-probe" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "datadog.fullname" . }}-security + namespace: {{ $.Release.Namespace }} + labels: + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/name: "{{ template "datadog.fullname" . }}" + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +data: + system-probe-seccomp.json: | + { + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "names": [ + "accept4", + "access", + "arch_prctl", + "bind", + "bpf", + "brk", + "capget", + "capset", + "chdir", + "clock_gettime", + "clone", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait", + "epoll_wait_old", + "execve", + "execveat", + "exit", + "exit_group", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fstat", + "fstat64", + "fstatfs", + "fsync", + "futex", + "getcwd", + "getdents", + "getdents64", + "getegid", + "geteuid", + "getgid", + "getpeername", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "gettid", + "gettimeofday", + "getuid", + "getxattr", + "ioctl", + "ipc", + "listen", + "lseek", + "lstat", + "lstat64", + "madvise", + "mkdir", + "mkdirat", + "mmap", + "mmap2", + "mprotect", + "mremap", + "munmap", + "nanosleep", + "newfstatat", + "open", + "openat", + "pause", + "perf_event_open", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "prlimit64", + "pselect6", + "read", + "readlink", + "readlinkat", + "recvfrom", + "recvmmsg", + "recvmsg", + "rename", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_yield", + "seccomp", + "select", + "semtimedop", + "send", + "sendmmsg", + "sendmsg", + "sendto", + "set_robust_list", + "set_tid_address", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setns", + "setrlimit", + "setsid", + "setsidaccept4", + "setsockopt", + "setuid", + "setuid32", + "sigaltstack", + "socket", + "socketcall", + "socketpair", + "stat", + "stat64", + "statfs", + "sysinfo", + "umask", + "uname", + "unlink", + "unlinkat", + "wait4", + "waitid", + "waitpid", + "write" + ], + "action": "SCMP_ACT_ALLOW", + "args": null + }, + { + "names": [ + "setns" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 1073741824, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + } + ] + } +{{- end }} +{{- end }} diff --git a/charts/datadog/datadog/2.4.200/templates/system-probe-init.yaml b/charts/datadog/datadog/2.4.200/templates/system-probe-init.yaml new file mode 100644 index 000000000..3507c7192 --- /dev/null +++ b/charts/datadog/datadog/2.4.200/templates/system-probe-init.yaml @@ -0,0 +1,15 @@ +{{- define "system-probe-init" -}} +- name: seccomp-setup + image: "{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}" + command: + - cp + - /etc/config/system-probe-seccomp.json + - /host/var/lib/kubelet/seccomp/system-probe + volumeMounts: + - name: datadog-agent-security + mountPath: /etc/config + - name: seccomp-root + mountPath: /host/var/lib/kubelet/seccomp + resources: +{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }} +{{- end -}} diff --git a/charts/datadog/datadog/2.4.200/values.yaml b/charts/datadog/datadog/2.4.200/values.yaml new file mode 100644 index 000000000..8ab4ac1be --- /dev/null +++ b/charts/datadog/datadog/2.4.200/values.yaml @@ -0,0 +1,1254 @@ +## Default values for Datadog Agent +## See Datadog helm documentation to learn more: +## https://docs.datadoghq.com/agent/kubernetes/helm/ + +## @param nameOverride - string - optional +## Override name of app. +# +nameOverride: # "" + +## @param fullnameOverride - string - optional +## Override the full qualified app name. +# +fullnameOverride: # "" + +## @param targetSystem - string - required +## Set the target OS for this deployment +## Possible values: linux, windows +# +targetSystem: "linux" + +datadog: + ## @param apiKey - string - required + ## Set this to your Datadog API key before the Agent runs. + ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes + # + apiKey: + + ## @param apiKeyExistingSecret - string - optional + ## Use existing Secret which stores API key instead of creating a new one. + ## If set, this parameter takes precedence over "apiKey". + # + apiKeyExistingSecret: # + + ## @param appKey - string - optional + ## If you are using clusterAgent.metricsProvider.enabled = true, you must set + ## a Datadog application key for read access to your metrics. + # + appKey: # + + ## @param appKeyExistingSecret - string - optional + ## Use existing Secret which stores APP key instead of creating a new one + ## If set, this parameter takes precedence over "appKey". + # + appKeyExistingSecret: # + + ## @param securityContext - object - optional + ## You can modify the security context used to run the containers by + ## modifying the label type below: + # + securityContext: {} + # seLinuxOptions: + # user: "system_u" + # role: "system_r" + # type: "spc_t" + # level: "s0" + + ## @param clusterName - string - optional + ## Set a unique cluster name to allow scoping hosts and Cluster Checks easily + ## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions: + ## * Lowercase letters, numbers, and hyphens only. + ## * Must start with a letter. + ## * Must end with a number or a letter. + ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: + ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name + # + clusterName: # + + ## @param site - string - optional - default: 'datadoghq.com' + ## The site of the Datadog intake to send Agent data to. + ## Set to 'datadoghq.eu' to send data to the EU site. + # + site: # datadoghq.com + + ## @param dd_url - string - optional - default: 'https://app.datadoghq.com' + ## The host of the Datadog intake server to send Agent data to, only set this option + ## if you need the Agent to send data to a custom URL. + ## Overrides the site setting defined in "site". + # + dd_url: # https://app.datadoghq.com + + ## @param logLevel - string - required + ## Set logging verbosity, valid log levels are: + ## trace, debug, info, warn, error, critical, and off + # + logLevel: INFO + + ## @param kubeStateMetricsEnabled - boolean - required + ## If true, deploys the kube-state-metrics deployment. + ## ref: https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics + # + kubeStateMetricsEnabled: true + + ## @param clusterChecks - object - required + ## Enable the Cluster Checks feature on both the cluster-agents and the daemonset + ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ + ## Autodiscovery via Kube Service annotations is automatically enabled + # + clusterChecks: + enabled: false + + ## @param nodeLabelsAsTags - list of key:value strings - optional + ## Provide a mapping of Kubernetes Node Labels to Datadog Tags. + # + nodeLabelsAsTags: {} + # beta.kubernetes.io/instance-type: aws-instance-type + # kubernetes.io/role: kube_role + # : + + ## @param podLabelsAsTags - list of key:value strings - optional + ## Provide a mapping of Kubernetes Labels to Datadog Tags. + # + podLabelsAsTags: {} + # app: kube_app + # release: helm_release + # : + + ## @param podAnnotationsAsTags - list of key:value strings - optional + ## Provide a mapping of Kubernetes Annotations to Datadog Tags + # + podAnnotationsAsTags: {} + # iam.amazonaws.com/role: kube_iamrole + # : + + ## @param tags - list of key:value elements - optional + ## List of tags to attach to every metric, event and service check collected by this Agent. + ## + ## Learn more about tagging: https://docs.datadoghq.com/tagging/ + # + tags: [] + # - ":" + # - ":" + + ## @param dogstatsd - object - required + ## dogstatsd configuration + ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ + ## To emit custom metrics from your Kubernetes application, use DogStatsD. + # + dogstatsd: + ## @param port - integer - optional - default: 8125 + ## Override the Agent DogStatsD port. + ## Note: Make sure your client is sending to the same UDP port. + # + port: 8125 + + ## @param originDetection - boolean - optional + ## Enable origin detection for container tagging + ## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging + # + originDetection: false + + ## @param useSocketVolume - boolean - optional + ## Enable dogstatsd over Unix Domain Socket + ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ + # + useSocketVolume: false + + ## @param socketPath - string - optional + ## Path to the DogStatsD socket + # + socketPath: /var/run/datadog/dsd.socket + + ## @param hostSocketPath - string - optional + ## host path to the DogStatsD socket + # + hostSocketPath: /var/run/datadog/ + + ## @param useHostPort - boolean - optional + ## Sets the hostPort to the same value of the container port. Needs to be used + ## for sending custom metrics. + ## The ports need to be available on all hosts. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + # + useHostPort: false + + ## @param useHostPID - boolean - optional + ## Run the agent in the host's PID namespace. This is required for Dogstatsd origin + ## detection to work. See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ + # + useHostPID: false + + ## @param nonLocalTraffic - boolean - optional - default: false + ## Enable this to make each node accept non-local statsd traffic. + ## ref: https://github.com/DataDog/docker-dd-agent#environment-variables + # + nonLocalTraffic: false + + ## @param collectEvents - boolean - optional - default: false + ## Enables this to start event collection from the kubernetes API + ## ref: https://docs.datadoghq.com/agent/kubernetes/event_collection/ + # + collectEvents: false + + ## @param leaderElection - boolean - optional - default: false + ## Enables leader election mechanism for event collection. + # + leaderElection: false + + ## @param leaderLeaseDuration - integer - optional - default: 60 + ## Set the lease time for leader election in second. + # + leaderLeaseDuration: # 60 + + ## @param logs - object - required + ## Enable logs agent and provide custom configs + # + logs: + ## @param enabled - boolean - optional - default: false + ## Enables this to activate Datadog Agent log collection. + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + # + enabled: false + + ## @param containerCollectAll - boolean - optional - default: false + ## Enable this to allow log collection for all containers. + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + # + containerCollectAll: false + + ## @param containerUseFiles - boolean - optional - default: true + ## Collect logs from files in /var/log/pods instead of using container runtime API. + ## It's usually the most efficient way of collecting logs. + ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup + # + containerCollectUsingFiles: true + + ## @param apm - object - required + ## Enable apm agent and provide custom configs + # + apm: + ## @param enabled - boolean - optional - default: false + ## Enable this to enable APM and tracing, on port 8126 + ## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host + # + enabled: false + + ## @param port - integer - optional - default: 8126 + ## Override the trace Agent port. + ## Note: Make sure your client is sending to the same UDP port. + # + port: 8126 + + ## @param useSocketVolume - boolean - optional + ## Enable APM over Unix Domain Socket + ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ + # + useSocketVolume: false + + ## @param socketPath - string - optional + ## Path to the trace-agent socket + # + socketPath: /var/run/datadog/apm.socket + + ## @param hostSocketPath - string - optional + ## host path to the trace-agent socket + # + hostSocketPath: /var/run/datadog/ + + ## @param env - list of object - optional + ## The dd-agent supports many environment variables + ## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables + # + env: [] + # - name: + # value: + + ## @param confd - list of objects - optional + ## Provide additional check configurations (static and Autodiscovery) + ## Each key becomes a file in /conf.d + ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes + ## ref: https://docs.datadoghq.com/agent/autodiscovery/ + # + confd: {} + # redisdb.yaml: |- + # init_config: + # instances: + # - host: "name" + # port: "6379" + # kubernetes_state.yaml: |- + # ad_identifiers: + # - kube-state-metrics + # init_config: + # instances: + # - kube_state_url: http://%%host%%:8080/metrics + + ## @param checksd - list of key:value strings - optional + ## Provide additional custom checks as python code + ## Each key becomes a file in /checks.d + ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes + # + checksd: {} + # service.py: |- + + ## @param dockerSocketPath - string - optional + ## Path to the docker socket + # + dockerSocketPath: # /var/run/docker.sock + + ## @param criSocketPath - string - optional + ## Path to the container runtime socket (if different from Docker) + ## This is supported starting from agent 6.6.0 + # + criSocketPath: # /var/run/containerd/containerd.sock + + ## @param processAgent - object - required + ## Enable process agent and provide custom configs + # + processAgent: + ## @param enabled - boolean - required + ## Set this to true to enable live process monitoring agent + ## Note: /etc/passwd is automatically mounted to allow username resolution. + ## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset + # + enabled: true + + ## @param processCollection - boolean - required + ## Set this to true to enable process collection in process monitoring agent + ## Requires processAgent.enabled to be set to true to have any effect + # + processCollection: false + + ## @param systemProbe - object - required + ## Enable systemProbe agent and provide custom configs + # + systemProbe: + ## @param enabled - boolean - required + ## Set this to true to enable system-probe agent + # + enabled: false + + ## @param debugPort - integer - required + ## Specify the port to expose pprof and expvar for system-probe agent + # + debugPort: 0 + + ## @param enableConntrack - boolean - required + ## Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data + ## Ref: http://conntrack-tools.netfilter.org/ + # + enableConntrack: true + + ## @param seccomp - string - required + ## Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges + ## Note that this will break `kubectl exec … -c system-probe -- /bin/bash` + # + seccomp: localhost/system-probe + + ## @param seccompRoot - string - required + ## Specify the seccomp profile root directory + # + seccompRoot: /var/lib/kubelet/seccomp + + ## @param bpfDebug - boolean - required + ## Enable logging for kernel debug + # + bpfDebug: false + + ## @param apparmor profile - string - required + ## specify a apparmor profile for system-probe + # + apparmor: unconfined + + ## @param enableTCPQueueLength - boolean - optional + ## Enable the TCP queue length eBPF-based check + # + enableTCPQueueLength: false + + ## @param enableOOMKill - boolean - optional + ## Enable the OOM kill eBPF-based check + # + enableOOMKill: false + + ## @param collectDNSStats - boolean - optional + ## Enable DNS stat collection + # + collectDNSStats: false + + orchestratorExplorer: + ## @param enabled - boolean - required + ## Set this to true to enable the orchestrator explorer. + ## This requires processAgent.enabled and clusterAgent.enabled to be set to true + ## ref: TODO - add doc link + # + enabled: false + +## @param clusterAgent - object - required +## This is the Datadog Cluster Agent implementation that handles cluster-wide +## metrics more cleanly, separates concerns for better rbac, and implements +## the external metrics API so you can autoscale HPAs based on datadog metrics +## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ +# +clusterAgent: + ## @param enabled - boolean - required + ## Set this to true to enable Datadog Cluster Agent + # + enabled: false + + ## @param image - object - required + ## Define the Datadog Cluster-Agent image to work with. + # + image: + ## @param repository - string - required + ## Define the repository to use: + # + repository: datadog/cluster-agent + + ## @param tag - string - required + ## Define the Cluster-Agent version to use. + # + tag: 1.7.0 + + ## @param pullPolicy - string - required + ## The Kubernetes pull policy. + # + pullPolicy: IfNotPresent + + ## @param pullSecrets - list of key:value strings - optional + ## It is possible to specify docker registry credentials + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + # + pullSecrets: [] + # - name: "" + + ## @param command - array - optional + ## Command to run in the Cluster Agent container + # + command: [] + + ## @param token - string - required + ## This needs to be at least 32 characters a-zA-z + ## It is a preshared key between the node agents and the cluster agent + ## ref: + # + token: "" + + ## @param replicas - integer - required + ## Specify the of cluster agent replicas, if > 1 it allow the cluster agent to + ## work in HA mode. + # + replicas: 1 + + ## @param rbac - object - required + ## Provide Cluster Agent Deployment pod(s) RBAC configuration + rbac: + ## @param created - boolean - required + ## If true, create & use RBAC resources + # + create: true + + ## @param serviceAccountName - string - required + ## Ignored if clusterAgentrbac.create is true + # + serviceAccountName: default + + ## @param metricsProvider - object - required + ## Enable the metricsProvider to be able to scale based on metrics in Datadog + # + metricsProvider: + ## @param enabled - boolean - required - default: false + ## Set this to true to enable Metrics Provider + # + enabled: false + + ## @param wpaController - boolean - optional + ## Enable informer and controller of the watermark pod autoscaler + ## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before + # + wpaController: false + + ## @param useDatadogMetrics - boolean - optional + ## Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries + ## NOTE: You need to install the `DatadogMetric` CRD before + # + useDatadogMetrics: false + + ## @param createReaderRbac - boolean - optional + ## Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) + # + createReaderRbac: true + + ## Configuration for the service for the cluster-agent metrics server + # + service: + ## @param type - string - optional + ## + # + type: ClusterIP + ## @param port - int - optional + ## + port: 8443 + + ## @param env - list of object - optional + ## The Cluster-Agent supports many additional environment variables that can + ## be passed literally. + ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options + # + env: [] + + ## @param admissionController - object - required + ## Enable the admissionController to be able to inject APM/Dogstatsd config + ## and standard tags (env, service, version) automatically into your pods + # + admissionController: + enabled: false + + ## @param mutateUnlabelled - boolean - optional + ## Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"' + # + mutateUnlabelled: false + + ## @param confd - list of objects - optional + ## Provide additional cluster check configurations + ## Each key will become a file in /conf.d + ## ref: https://docs.datadoghq.com/agent/autodiscovery/ + # + confd: {} + # mysql.yaml: |- + # cluster_check: true + # instances: + # - server: '' + # port: 3306 + # user: datadog + # pass: '' + + ## @param resources - object -required + ## Datadog cluster-agent resource requests and limits. + # + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi + + ## @param priorityclassName - string - optional + ## Name of the priorityClass to apply to the Cluster Agent + # + priorityClassName: # system-cluster-critical + + ## @param nodeSelector - object - optional + ## Allow the Cluster Agent Deployment to schedule on selected nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## @param affinity - object - optional + ## Allow the Cluster Agent Deployment to schedule using affinity rules + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # + affinity: {} + + ## @param healthPort - integer - optional - default: 5555 + ## Port number use the cluster-agent to server healthz endpoint + healthPort: 5555 + + ## @param livenessProbe - object - required + ## Override the agent's liveness probe logic from the default: + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + livenessProbe: + httpGet: + port: 5555 + path: /live + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + ## @param readinessProbe - object - required + ## Override the cluster-agent's readiness probe logic from the default: + # + readinessProbe: + httpGet: + port: 5555 + path: /ready + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + ## @param strategy - string - required + ## Allow the Cluster Agent deployment to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + + ## @param podAnnotations - list of key:value strings - optional + ## Annotations to add to the cluster-agents's pod(s) + # + podAnnotations: {} + # key: "value" + + ## @param useHostNetwork - boolean - optional + ## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might + ## not be supported. The ports need to be available on all hosts. It can be + ## used for custom metrics instead of a service endpoint. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + # + useHostNetwork: # true + + ## @param dnsConfig - list of objects - optional + ## specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + ## @param volumes - list of objects - optional + ## Specify additional volumes to mount in the cluster-agent container + # + volumes: [] + # - hostPath: + # path: + # name: + + ## @param volumeMounts - list of objects - optional + ## Specify additional volumes to mount in the cluster-agent container + # + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + + ## @param datadog-cluster.yaml - object - optional + ## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml). + # + datadog_cluster_yaml: {} + + ## @param createPodDisruptionBudget - boolean - optional + ## Specify the pod disruption budget to apply to the cluster agents + # + createPodDisruptionBudget: false + +agents: + ## @param enabled - boolean - required + ## You should keep Datadog DaemonSet enabled! + ## The exceptional case could be a situation when you need to run + ## single Datadog pod per every namespace, but you do not need to + ## re-create a DaemonSet for every non-default namespace install. + ## Note: StatsD and DogStatsD work over UDP, so you may not + ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! + # + enabled: true + + ## @param image - object - required + ## Define the Datadog image to work with. + # + image: + ## @param repository - string - required + ## Define the repository to use: + ## use "datadog/agent" for Datadog Agent 7 + ## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7 + # + repository: datadog/agent + + ## @param tag - string - required + ## Define the Agent version to use. + ## Use 7-jmx to enable jmx fetch collection + # + tag: 7.21.1 + + ## @param doNotCheckTag - boolean - optional + ## By default, the version passed in agents.image.tag is checked + ## for compatibility with the version of the chart. + ## This boolean permits to completely skip this check. + ## This is useful, for example, for custom tags that are not + ## respecting semantic versioning + # + doNotCheckTag: # false + + ## @param pullPolicy - string - required + ## The Kubernetes pull policy. + # + pullPolicy: IfNotPresent + + ## @param pullSecrets - list of key:value strings - optional + ## It is possible to specify docker registry credentials + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + # + pullSecrets: [] + # - name: "" + + ## @param rbac - object - required + ## Provide Daemonset RBAC configuration + rbac: + + ## @param created - boolean - required + ## If true, create & use RBAC resources + # + create: true + + ## @param serviceAccountName - string - required + ## Ignored if daemonset.rbac.create is true + # + serviceAccountName: default + + ## @param podSecurity - object - optional + ## Provide Daemonset PodSecurityPolicy configuration + podSecurity: + + ## @param podSecurityPolicy - object - required + ## Provide Daemonset PodSecurityPolicy configuration + podSecurityPolicy: + + ## @param created - boolean - optional + ## If true, create a PodSecurityPolicy resource for Agent pods + # + create: false + + ## @param securityContextConstraints - object - required + ## Provide Daemonset securityContextConstraints configuration + securityContextConstraints: + + ## @param created - boolean - optional + ## If true, create a SecurityContextConstraints resource for Agent pods + # + create: false + + ## @param securityContext - object - required + ## Provide securityContext configuration + # + securityContext: + rule: MustRunAs + seLinuxOptions: + user: system_u + role: system_r + type: spc_t + level: s0 + + ## @param privileged - boolean - optional + ## If true, Allow to run privileged containers + # + privileged: false + + ## @param capabilites - list - optional + ## Allowed capabilites + # + capabilites: + - SYS_ADMIN + - SYS_RESOURCE + - SYS_PTRACE + - NET_ADMIN + - NET_BROADCAST + - IPC_LOCK + + ## @param volumes - list - optional + ## Allowed volumes types + # + volumes: + - configMap + - downwardAPI + - emptyDir + - hostPath + - secret + + ## @param seccompProfiles - list - optional + ## Allowed seccomp profiles + # + seccompProfiles: + - "runtime/default" + - "localhost/system-probe" + + ## @param apparmorProfiles - list - optional + ## Allowed apparmor profiles + # + apparmorProfiles: + - "runtime/default" + + containers: + agent: + ## @param env - list - required + ## Additional environment variables for the agent container. + # + env: [] + + ## @param logLevel - string - optional + ## Set logging verbosity, valid log levels are: + ## trace, debug, info, warn, error, critical, and off. + ## If not set, fall back to the value of datadog.logLevel. + # + logLevel: # INFO + + ## @param resources - object - required + ## Resource requests and limits for the agent container. + # + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi + + ## @param livenessProbe - object - required + ## Override the agent's liveness probe logic from the default: + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + livenessProbe: + httpGet: + path: /live + port: 5555 + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + ## @param readinessProbe - object - required + ## Override the agent's readiness probe logic from the default: + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + readinessProbe: + httpGet: + path: /ready + port: 5555 + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + processAgent: + ## @param env - list - required + ## Additional environment variables for the process-agent container. + # + env: [] + + ## @param logLevel - string - optional + ## Set logging verbosity, valid log levels are: + ## trace, debug, info, warn, error, critical, and off. + ## If not set, fall back to the value of datadog.logLevel. + # + logLevel: # INFO + + ## @param resources - object - required + ## Resource requests and limits for the process-agent container. + # + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + traceAgent: + ## @param env - list - required + ## Additional environment variables for the trace-agent container. + # + env: + + ## @param logLevel - string - optional + ## Set logging verbosity, valid log levels are: + ## trace, debug, info, warn, error, critical, and off. + ## If not set, fall back to the value of datadog.logLevel. + # + logLevel: # INFO + + ## @param resources - object - required + ## Resource requests and limits for the trace-agent container. + # + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + ## @param livenessProbe - object - optional + ## Override the trace agent's liveness probe logic from the default: + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + livenessProbe: + tcpSocket: + port: 8126 + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + + systemProbe: + ## @param env - list - required + ## Additional environment variables for the system-probe container. + # + env: [] + + ## @param logLevel - string - optional + ## Set logging verbosity, valid log levels are: + ## trace, debug, info, warn, error, critical, and off. + ## If not set, fall back to the value of datadog.logLevel. + # + logLevel: # INFO + + ## @param resources - object - required + ## Resource requests and limits for the system-probe container. + # + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + initContainers: + ## @param resources - object - required + ## Resource requests and limits for the init containers. + # + resources: {} + # requests: + # cpu: 100m + # memory: 200Mi + # limits: + # cpu: 100m + # memory: 200Mi + + ## @param volumes - list of objects - optional + ## Specify additional volumes to mount in the dd-agent container + # + volumes: [] + # - hostPath: + # path: + # name: + + ## @param volumeMounts - list of objects - optional + ## Specify additional volumes to mount in the dd-agent container + # + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + + ## @param useHostNetwork - boolean - optional + ## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might + ## not be supported. The ports need to be available on all hosts. It Can be + ## used for custom metrics instead of a service endpoint. + ## + ## WARNING: Make sure that hosts using this are properly firewalled otherwise + ## metrics and traces are accepted from any host able to connect to this host. + # + useHostNetwork: false + + ## @param dnsConfig - list of objects - optional + ## specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + ## @param podAnnotations - list of key:value strings - optional + ## Annotations to add to the DaemonSet's Pods + # + podAnnotations: {} + # : '[{"key": "", "value": ""}]' + + ## @param tolerations - array - optional + ## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) + # + tolerations: [] + + ## @param nodeSelector - object - optional + ## Allow the DaemonSet to schedule on selected nodes + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## @param affinity - object - optional + ## Allow the DaemonSet to schedule using affinity rules + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # + affinity: {} + + ## @param updateStrategy - string - optional + ## Allow the DaemonSet to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + # + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: "10%" + + ## @param priorityClassName - string - optional + ## Sets PriorityClassName if defined. + # + priorityClassName: + + ## @param podLabels - object - optional + ## Sets podLabels if defined. + # + podLabels: {} + + ## @param useConfigMap - boolean - optional + ## Configures a configmap to provide the agent configuration + ## Use this in combination with the `agent.customAgentConfig` parameter. + # + useConfigMap: # false + + ## @param customAgentConfig - object - optional + ## Specify custom contents for the datadog agent config (datadog.yaml). + ## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 + ## ref: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml + ## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account. + # + customAgentConfig: {} + # # Autodiscovery for Kubernetes + # listeners: + # - name: kubelet + # config_providers: + # - name: kubelet + # polling: true + # # needed to support legacy docker label config templates + # - name: docker + # polling: true + # + # # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration + # apm_config: + # enabled: false + # apm_non_local_traffic: true + # + # # Enable java cgroup handling. Only one of those options should be enabled, + # # depending on the agent version you are using along that chart. + # + # # agent version < 6.15 + # # jmx_use_cgroup_memory_limit: true + # + # # agent version >= 6.15 + # # jmx_use_container_support: true + +clusterChecksRunner: + ## @param enabled - boolean - required + ## If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents. + ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ + # + enabled: false + + ## @param image - object - required + ## Define the Datadog image to work with. + # + image: + + ## @param repository - string - required + ## Define the repository to use: + ## use "datadog/agent" for Datadog Agent 7 + ## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7 + # + repository: datadog/agent + + ## @param tag - string - required + ## Define the Agent version to use. + ## Use 7-jmx to enable jmx fetch collection + # + tag: 7.21.1 + + ## @param pullPolicy - string - required + ## The Kubernetes pull policy. + # + pullPolicy: IfNotPresent + + ## @param pullSecrets - list of key:value strings - optional + ## It is possible to specify docker registry credentials + ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + # + pullSecrets: [] + # - name: "" + + ## @param createPodDisruptionBudget - boolean - optional + ## Specify the pod disruption budget to apply to the cluster checks agents + # + createPodDisruptionBudget: false + + ## @param rbac - object - required + ## Provide Cluster Checks Deployment pods RBAC configuration + rbac: + ## @param created - boolean - required + ## If true, create & use RBAC resources + # + create: true + + ## @param dedicated - boolean - required + ## If true, use a dedicated RBAC resource for the cluster checks agent(s) + # + dedicated: false + + ## @param serviceAccountAnnotations - object - required + ## Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true + # + serviceAccountAnnotations: {} + + ## @param serviceAccountName - string - required + ## Ignored if clusterChecksRunner.rbac.create is true + # + serviceAccountName: default + + ## @param replicas - integer - required + ## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2. + ## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks. + # + replicas: 2 + + ## @param resources - object -required + ## Datadog clusterchecks-agent resource requests and limits. + # + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + # limits: + # cpu: 200m + # memory: 500Mi + + ## @param affinity - object - optional + ## Allow the ClusterChecks Deployment to schedule using affinity rules. + ## By default, ClusterChecks Deployment Pods are forced to run on different Nodes. + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # + affinity: {} + + ## @param strategy - string - optional + ## Allow the ClusterChecks deployment to perform a rolling update on helm update + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + # + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + + ## @param dnsConfig - list of objects - optional + ## specify dns configuration options for datadog cluster agent containers e.g ndots + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + dnsConfig: {} + # options: + # - name: ndots + # value: "1" + + ## @param nodeSelector - object - optional + ## Allow the ClusterChecks Deployment to schedule on selected nodes + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## @param tolerations - array - required + ## Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # + tolerations: [] + + ## @param livenessProbe - object - required + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + # livenessProbe: + # exec: + # command: ["/bin/true"] + # + livenessProbe: + httpGet: + path: /live + port: 5555 + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + ## @param readinessProbe - object - required + ## In case of issues with the probe, you can disable it with the + ## following values, to allow easier investigating: + # + # readinessProbe: + # exec: + # command: ["/bin/true"] + # + readinessProbe: + httpGet: + path: /ready + port: 5555 + initialDelaySeconds: 15 + periodSeconds: 15 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + + ## @param env - list of object - optional + ## The dd-agent supports many environment variables + ## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#environment-variables + # + env: [] + # - name: + # value: + + ## @param volumes - list of objects - optional + ## Specify additional volumes to mount in the cluster checks container + # + volumes: [] + # - hostPath: + # path: + # name: + + ## @param volumeMounts - list of objects - optional + ## Specify additional volumes to mount in the cluster checks container + # + volumeMounts: [] + # - name: + # mountPath: + # readOnly: true + +kube-state-metrics: + rbac: + ## @param created - boolean - required + ## If true, create & use RBAC resources + # + create: true + + serviceAccount: + ## @param created - boolean - required + ## If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true + # + create: true + ## @param name - string - required + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + # + name: + + ## @param resources - object - optional + ## Resource requests and limits for the kube-state-metrics container. + # + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 200m + # memory: 256Mi diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/.helmignore b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/Chart.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/Chart.yaml new file mode 100644 index 000000000..5cb97fa16 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: dynatrace-oneagent-operator +apiVersion: v2 +appVersion: 0.8.0 +description: The Dynatrace OneAgent Operator Helm chart for Kubernetes and Openshift +home: https://www.dynatrace.com/ +icon: https://assets.dynatrace.com/global/resources/Signet_Logo_RGB_CP_512x512px.png +maintainers: +- email: marco.mader@dynatrace.com + name: DTMad +- email: luis.garcia@dynatrace.com + name: lrgar +- email: michael.mayr@dynatrace.com + name: mmayr-at +name: dynatrace-oneagent-operator +sources: +- https://github.com/Dynatrace/helm-charts +type: application +version: 0.8.000 diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/README.md b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/README.md new file mode 100644 index 000000000..1f45987fb --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/README.md @@ -0,0 +1,86 @@ +# Welcome + +Dynatrace automatically discovers, baselines, and intelligently monitors Kubernetes clusters and workloads. Learn more about Dynatrace at [our website](https://www.dynatrace.com/platform/). + +# Dynatrace OneAgent Operator Helm Chart + +The Dynatrace OneAgent Operator Helm Chart which supports the rollout and lifecycle of [Dynatrace OneAgent](https://www.dynatrace.com/support/help/get-started/introduction/what-is-oneagent/) in Kubernetes and OpenShift clusters. + +This Helm Chart requires Helm 3. + +### Platforms +Depending of the version of the Dynatrace OneAgent Operator, it supports the following platforms: + +| Dynatrace OneAgent Operator Helm Chart version | Kubernetes | OpenShift Container Platform | +| ---------------------------------------------- | ---------- | ---------------------------- | +| v0.8.0 | 1.14+ | 3.11+ | +| v0.7.1 | 1.14+ | 3.11+ | +| v0.6.0 | 1.11+ | 3.11+ | +| v0.5.4 | 1.11+ | 3.11+ | + + +## Quick Start + +The Dynatrace OneAgent Operator acts on its separate namespace `dynatrace`. +It holds the operator deployment and all dependent objects like permissions, custom resources and +corresponding DaemonSets. +To install the Dynatrace OneAgent Operator via Helm run the following command: + +### Adding Dynatrace OneAgent Helm repository +``` +$ helm repo add dynatrace https://raw.githubusercontent.com/Dynatrace/helm-charts/master/repos/stable +``` + +### Prepare tokens + +Generate an API and a PaaS token in your Dynatrace environment. + +https://www.dynatrace.com/support/help/reference/dynatrace-concepts/why-do-i-need-an-environment-id/#create-user-generated-access-tokens + +To install the Dynatrace OneAgent Operator replace the APIUrl, the API token and the PaaS token in command and execute it + +#### Kubernetes +``` +$ kubectl create namespace dynatrace +$ helm install dynatrace-oneagent-operator dynatrace/dynatrace-oneagent-operator -n dynatrace --set platform="kubernetes",oneagent.apiUrl="https://ENVIRONMENTID.live.dynatrace.com/api",secret.apiToken="DYNATRACE_API_TOKEN",secret.paasToken="PLATFORM_AS_A_SERVICE_TOKEN" +``` + +#### OpenShift +``` +$ oc adm new-project --node-selector="" dynatrace +$ helm install dynatrace-oneagent-operator dynatrace/dynatrace-oneagent-operator -n dynatrace --set platform="openshift",oneagent.apiUrl="https://ENVIRONMENTID.live.dynatrace.com/api",secret.apiToken="DYNATRACE_API_TOKEN",secret.paasToken="PLATFORM_AS_A_SERVICE_TOKEN" +``` + +This will automatically install the Dynatrace OneAgent Operator and create OneAgents for every of your nodes. + +## Update procedure + +To update simply update your helm repositories and check the latest version + +``` +$ helm repo update +``` + +You can then check for the latest version by searching your Helm repositories for the Dynatrace OneAgent Operator + +``` +$ helm search repo dynatrace-oneagent-operator +``` + +To then update to the latest version run this command and do not forget to add the `reuse-values` flag to keep your configuration + +``` +$ helm upgrade dynatrace-oneagent-operator dynatrace/dynatrace-oneagent-operator -n dynatrace --reuse-values +``` + +## Uninstall dynatrace-oneagent-operator +Remove OneAgent custom resources and clean-up all remaining OneAgent Operator specific objects: + + +```sh +$ helm uninstall dynatrace-oneagent-operator -n dynatrace +``` + +## License + +Dynatrace OneAgent Operator Helm Chart is under Apache 2.0 license. See [LICENSE](../LICENSE) for details. diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/app-readme.md b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/app-readme.md new file mode 100644 index 000000000..9376a87ed --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/app-readme.md @@ -0,0 +1,6 @@ +# Dynatrace OneAgent Operator + +This is the home of the Dynatrace OneAgent Operator's Helm Chart which supports the rollout and lifecycle of [Dynatrace OneAgent](https://www.dynatrace.com/support/help/get-started/introduction/what-is-oneagent/) in Kubernetes and OpenShift clusters. +Rolling out Dynatrace OneAgent via DaemonSet on a cluster is straightforward. +Maintaining its lifecycle places a burden on the operational team. +Dynatrace OneAgent Operator closes this gap by automating the repetitive steps involved in keeping Dynatrace OneAgent at its latest desired version. diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagentapms_crd.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagentapms_crd.yaml new file mode 100644 index 000000000..a3fefe9ef --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagentapms_crd.yaml @@ -0,0 +1,145 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: oneagentapms.dynatrace.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.apiUrl + name: ApiUrl + type: string + - JSONPath: .spec.tokens + name: Tokens + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: dynatrace.com + names: + categories: + - dynatrace + kind: OneAgentAPM + listKind: OneAgentAPMList + plural: oneagentapms + singular: oneagentapm + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OneAgentAPM configures the Dynatrace OneAgent for application monitoring + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OneAgentAPMSpec defines the desired state of OneAgentAPM + properties: + apiUrl: + description: Location of the Dynatrace API to connect to, including + your specific environment ID + type: string + enableIstio: + description: If enabled, Istio on the cluster will be configured automatically + to allow access to the Dynatrace environment + type: boolean + networkZone: + description: 'Optional: Adds the OneAgent to the given NetworkZone' + type: string + proxy: + description: 'Optional: Set custom proxy settings either directly or + from a secret with the field ''proxy''' + properties: + value: + type: string + valueFrom: + type: string + type: object + skipCertCheck: + description: Disable certificate validation checks for installer download + and API communication + type: boolean + tokens: + description: Credentials for the OneAgent to connect back to Dynatrace. + type: string + trustedCAs: + description: 'Optional: Adds custom RootCAs from a configmap' + type: string + required: + - apiUrl + type: object + status: + description: OneAgentAPMStatus defines the observed state of OneAgentAPM + properties: + conditions: + description: Conditions includes status about the current state of the + instance + items: + description: "Condition represents an observation of an object's state. + Conditions are an extension mechanism intended to be used when the + details of an observation are not a priori known or would not apply + to all instances of a given Kind. \n Conditions should be added + to explicitly convey properties that users and components care about + rather than requiring those properties to be inferred from other + observations. Once defined, the meaning of a Condition can not be + changed arbitrarily - it becomes part of the API, and has the same + backwards- and forwards-compatibility concerns of any other part + of the API." + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is intended to be a one-word, CamelCase + representation of the category of cause of the current status. + It is intended to be used in concise output, such as one-line + kubectl get output, and in summarizing occurrences of causes. + type: string + status: + type: string + type: + description: "ConditionType is the type of the condition and is + typically a CamelCased word or short phrase. \n Condition types + should indicate state in the \"abnormal-true\" polarity. For + example, if the condition indicates when a policy is invalid, + the \"is valid\" case is probably the norm, so the condition + should be called \"Invalid\"." + type: string + required: + - status + - type + type: object + type: array + lastAPITokenProbeTimestamp: + description: LastAPITokenProbeTimestamp tracks when the last request + for the API token validity was sent + format: date-time + type: string + lastPaaSTokenProbeTimestamp: + description: LastPaaSTokenProbeTimestamp tracks when the last request + for the PaaS token validity was sent + format: date-time + type: string + updatedTimestamp: + description: UpdatedTimestamp indicates when the instance was last updated + format: date-time + type: string + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagents_crd.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagents_crd.yaml new file mode 100644 index 000000000..f2a47603d --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/crds/dynatrace.com_oneagents_crd.yaml @@ -0,0 +1,368 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: oneagents.dynatrace.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.apiUrl + name: ApiUrl + type: string + - JSONPath: .spec.tokens + name: Tokens + type: string + - JSONPath: .status.version + name: Version + type: string + - JSONPath: .status.phase + name: Phase + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: dynatrace.com + names: + categories: + - dynatrace + kind: OneAgent + listKind: OneAgentList + plural: oneagents + singular: oneagent + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OneAgent configures the Dynatrace OneAgent for full-stack monitoring + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OneAgentSpec defines the desired state of OneAgent + properties: + apiUrl: + description: Location of the Dynatrace API to connect to, including + your specific environment ID + type: string + args: + description: 'Optional: Arguments to the OneAgent installer' + items: + type: string + type: array + disableAgentUpdate: + description: Disable automatic restarts of OneAgent pods in case a new + version is available + type: boolean + dnsPolicy: + description: 'Optional: Sets DNS Policy for the OneAgent pods' + type: string + enableIstio: + description: If enabled, Istio on the cluster will be configured automatically + to allow access to the Dynatrace environment + type: boolean + env: + description: 'Optional: List of environment variables to set for the + installer' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources + limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional + for env vars' + type: string + divisor: + description: Specifies the output format of the exposed + resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: 'Optional: the Dynatrace installer container image Defaults + to docker.io/dynatrace/oneagent:latest for Kubernetes and to registry.connect.redhat.com/dynatrace/oneagent + for OpenShift' + type: string + labels: + additionalProperties: + type: string + description: 'Optional: Adds additional labels for the OneAgent pods' + type: object + networkZone: + description: 'Optional: Adds the OneAgent to the given NetworkZone' + type: string + nodeSelector: + additionalProperties: + type: string + description: Node selector to control the selection of nodes (optional) + type: object + priorityClassName: + description: 'Optional: If specified, indicates the pod''s priority. + Name must be defined by creating a PriorityClass object with that + name. If not specified the setting will be removed from the DaemonSet.' + type: string + proxy: + description: 'Optional: Set custom proxy settings either directly or + from a secret with the field ''proxy''' + properties: + value: + type: string + valueFrom: + type: string + type: object + resources: + description: 'Optional: define resources requests and limits for single + pods' + properties: + limits: + additionalProperties: + type: string + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + type: string + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceAccountName: + description: 'Optional: set custom Service Account Name used with OneAgent + pods' + type: string + skipCertCheck: + description: Disable certificate validation checks for installer download + and API communication + type: boolean + tokens: + description: Credentials for the OneAgent to connect back to Dynatrace. + type: string + tolerations: + description: 'Optional: set tolerations for the OneAgent pods' + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + trustedCAs: + description: 'Optional: Adds custom RootCAs from a configmap' + type: string + waitReadySeconds: + description: 'Optional: Defines the time to wait until OneAgent pod + is ready after update - default 300 sec' + minimum: 0 + type: integer + required: + - apiUrl + type: object + status: + description: OneAgentStatus defines the observed state of OneAgent + properties: + conditions: + description: Conditions includes status about the current state of the + instance + items: + description: "Condition represents an observation of an object's state. + Conditions are an extension mechanism intended to be used when the + details of an observation are not a priori known or would not apply + to all instances of a given Kind. \n Conditions should be added + to explicitly convey properties that users and components care about + rather than requiring those properties to be inferred from other + observations. Once defined, the meaning of a Condition can not be + changed arbitrarily - it becomes part of the API, and has the same + backwards- and forwards-compatibility concerns of any other part + of the API." + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is intended to be a one-word, CamelCase + representation of the category of cause of the current status. + It is intended to be used in concise output, such as one-line + kubectl get output, and in summarizing occurrences of causes. + type: string + status: + type: string + type: + description: "ConditionType is the type of the condition and is + typically a CamelCased word or short phrase. \n Condition types + should indicate state in the \"abnormal-true\" polarity. For + example, if the condition indicates when a policy is invalid, + the \"is valid\" case is probably the norm, so the condition + should be called \"Invalid\"." + type: string + required: + - status + - type + type: object + type: array + instances: + additionalProperties: + properties: + ipAddress: + type: string + podName: + type: string + version: + type: string + type: object + type: object + lastAPITokenProbeTimestamp: + description: LastAPITokenProbeTimestamp tracks when the last request + for the API token validity was sent + format: date-time + type: string + lastPaaSTokenProbeTimestamp: + description: LastPaaSTokenProbeTimestamp tracks when the last request + for the PaaS token validity was sent + format: date-time + type: string + phase: + description: Defines the current state (Running, Updating, Error, ...) + type: string + updatedTimestamp: + description: UpdatedTimestamp indicates when the instance was last updated + format: date-time + type: string + version: + description: Dynatrace version being used. + type: string + type: object + required: + - spec + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/logo.png b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/logo.png new file mode 100644 index 000000000..6714eb8a5 Binary files /dev/null and b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/logo.png differ diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/overview.svg b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/overview.svg new file mode 100644 index 000000000..5e1092df4 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/overview.svg @@ -0,0 +1,2 @@ + +
OneAgent Pods
[Not supported by viewer]
trigger
rollout
[Not supported by viewer]
query
version
query<br>version
trigger
update
[Not supported by viewer]
OneAgent
CustomResource
[Not supported by viewer]
DaemonSet
DaemonSet
watch
changes
watch<br>changes
DynatraceOneAgentOperatorDynatrace Cluster
\ No newline at end of file diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/questions.yml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/questions.yml new file mode 100644 index 000000000..561642772 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/questions.yml @@ -0,0 +1,175 @@ +questions: + +#################### Agent Configuration (REQUIRED) #################### +- variable: mode + label: "Monitoring mode" + description: "Either fullstack for full monitoring or apm for application only monitoring" + default: "fullstack" + type: enum + group: "Agent Configuration (REQUIRED)" + options: + - "fullstack" + - "apm" + +- variable: oneagent.apiUrl + label: "Dynatrace API URL" + description: "Dynatrace API URL including `/api` path at the end" + default: "https://ENVIRONMENTID.live.dynatrace.com/api" + type: string + required: true + group: "Agent Configuration (REQUIRED)" + +- variable: secret.apiToken + label: "Dynatrace API token" + description: "Your Dynatrace API token - You can generate this token in your Dynatrace environment" + default: "" + type: string + required: true + group: "Agent Configuration (REQUIRED)" + +- variable: secret.paasToken + label: "Dynatrace PaaS token" + description: "Your Dynatrace Platform as a Service token - You can generate this token in your Dynatrace environment" + default: "" + type: string + required: true + group: "Agent Configuration (REQUIRED)" + +#################### Advanced Agent Configuration (OPTIONAL) #################### +- variable: show_advanced_config + label: "Show advanced configuration" + description: "Show advanced configuration options for the Dynatrace OneAgent Operator" + default: false + type: boolean + group: "Advanced Agent Configuration (OPTIONAL)" + +- variable: operator.image + label: "Custom Operator image location" + description: "The location from where to grab the Dynatrace OneAgent operator image - default is quay.io/dynatrace/dynatrace-oneagent-operator" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true" + +- variable: oneagent.name + label: "OneAgent CustomResource name" + default: "oneagent" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.disableAgentUpdate + label: "Disable automatic OneAgent updates" + description: "Disables automatic restarts of oneagent pods in case a new version is available" + default: false + type: boolean + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.dnsPolicy + label: "Set custom DNS Policy" + description: "DNS Policy for OneAgent pods. Empty for default (ClusterFirst), more at https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.enableIstio + label: "Enable istio" + description: "When enabled, and if Istio is installed on the Kubernetes environment, then the Operator will create the corresponding VirtualService and ServiceEntries objects to allow access to the Dynatrace cluster from the agent." + default: false + type: boolean + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true" + +- variable: oneagent.image + label: "Custom OneAgent image location" + description: "The location from where to grab the Dynatrace OneAgent image - default for Kubernetes is docker.io/dynatrace/oneagent" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.skipCertCheck + label: "Skip certificate check" + description: "Disable certificate validation checks for installer download and API communication" + default: false + type: boolean + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true" + +- variable: oneagent.priorityClassName + label: "Assign priority class to OneAgent pods" + description: "Priority class to assign to OneAgent pods, more at https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.proxy + label: "Define a proxy" + description: "Configures a proxy for the Agent, AgentDownload and the Operator. Provide the proxy here" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true" + +- variable: oneagent.trustedCAs + label: "Add custom CA certificates" + description: "Adds the provided CA certficates to the Operator and the OneAgent. Provide your custom certificates here. If this is not set the default embedded certificates on the images will be used" + default: "" + type: multiline + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true" + +- variable: oneagent.waitReadySeconds + label: "Wait seconds until ready" + description: "Define the time to wait until OneAgent pod is ready after update - defaults to 300s" + default: "" + type: int + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.args + label: "Arguments to OneAgent installer" + description: "Defines additional arguments which get passed to the OneAgent installer - Please edit as Yaml for the best experience. The expected format is YAML and not a string" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.env + label: "Environment variables for OneAgent" + description: "Defines additional environment variables which get passed to the OneAgent - Please edit as Yaml for the best experience" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.nodeSelector + label: "Node selector to control the selection of nodes" + description: "Defines a NodeSelector to customize to which nodes the OneAgent will be rolled out - Please edit as Yaml for the best experience" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.labels + label: "Custom labels for the OneAgent pods" + description: "Defines labels for OneAgent pods to structure workloads as desired - Please edit as Yaml for the best experience" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.resources + label: "Resource definition for the OneAgent pods" + description: "Defines the resources the OneAgent pods can use - Please edit as Yaml for the best experience" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" + +- variable: oneagent.tolerations + label: "Custom tolerations for the OneAgent" + description: "Defines custom tolerations to the OneAgent - Please edit as Yaml for the best experience - see https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + default: "" + type: string + group: "Advanced Agent Configuration (OPTIONAL)" + show_if: "show_advanced_config=true && mode=fullstack" diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-operator.yaml new file mode 100644 index 000000000..aedff9f83 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-operator.yaml @@ -0,0 +1,47 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +rules: + - apiGroups: + - "" # "" indicates the core API group + resources: + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - apiGroups: + - "" + resources: + - secrets + resourceNames: + - dynatrace-oneagent-config + verbs: + - get + - update + - delete diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-webhook.yaml new file mode 100644 index 000000000..661399f81 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrole-webhook.yaml @@ -0,0 +1,47 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dynatrace-oneagent-webhook + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - list + - create + - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + resourceNames: + - dynatrace-oneagent-webhook + verbs: + - get + - update diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-operator.yaml new file mode 100644 index 000000000..cca2b78f8 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-operator.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ .Release.Name }} + apiGroup: rbac.authorization.k8s.io diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-webhook.yaml new file mode 100644 index 000000000..b2305a81f --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/clusterrolebinding-webhook.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dynatrace-oneagent-webhook + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: dynatrace-oneagent-webhook + apiGroup: rbac.authorization.k8s.io diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/configmap.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/configmap.yaml new file mode 100644 index 000000000..10d35fcf3 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/configmap.yaml @@ -0,0 +1,25 @@ +# Copyright 2020 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if .Values.oneagent.trustedCAs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.oneagent.name }} + namespace: {{ .Release.Namespace }} +data: + certs: | +{{ .Values.oneagent.trustedCAs | indent 4 }} +{{- end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagent.yaml new file mode 100644 index 000000000..64229cfb0 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagent.yaml @@ -0,0 +1,99 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.mode "fullstack" }} +apiVersion: dynatrace.com/v1alpha1 +kind: OneAgent +metadata: + name: {{ .Values.oneagent.name }} + namespace: {{ .Release.Namespace }} +spec: + apiUrl: {{ required "ApiUrl needs to be set!" .Values.oneagent.apiUrl }} + tokens: {{ .Values.oneagent.name }} + image: {{ include "dynatrace-oneagent.image" . }} + + {{- if ne (printf "%T" .Values.oneagent.args) "string" }} + args: {{- toYaml .Values.oneagent.args | nindent 4 }} + {{- else }} + args: + - --set-app-log-content-access=true + {{- end }} + + {{- if .Values.oneagent.env }} + env: {{- toYaml .Values.oneagent.env | nindent 4 }} + {{- end }} + + {{- if .Values.oneagent.labels }} + labels: {{- toYaml .Values.oneagent.labels | nindent 4 }} + {{- end }} + + {{- if .Values.oneagent.nodeSelector }} + nodeSelector: {{- toYaml .Values.oneagent.nodeSelector | nindent 4 }} + {{- end }} + + {{- if .Values.oneagent.proxy }} + proxy: + valueFrom: {{ .Values.oneagent.name }} + {{- end }} + + {{- if ne (printf "%T" .Values.oneagent.tolerations) "string" }} + tolerations: {{- toYaml .Values.oneagent.tolerations | nindent 4 }} + {{- else }} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + {{- end }} + + {{- if .Values.oneagent.resources }} + resources: {{- toYaml .Values.oneagent.resources | nindent 4 }} + {{- end }} + + {{- if .Values.oneagent.dnsPolicy }} + dnsPolicy: {{ .Values.oneagent.dnsPolicy }} + {{- end }} + + {{- if .Values.oneagent.enableIstio }} + enableIstio: {{ .Values.oneagent.enableIstio }} + {{- end }} + + {{- if .Values.oneagent.disableAgentUpdate }} + disableAgentUpdate: {{ .Values.oneagent.disableAgentUpdate }} + {{- end }} + + {{- if .Values.oneagent.skipCertCheck }} + skipCertCheck: {{ .Values.oneagent.skipCertCheck }} + {{- end }} + + {{- if .Values.oneagent.waitReadySeconds }} + waitReadySeconds: {{ .Values.oneagent.waitReadySeconds }} + {{- end }} + + {{- if .Values.oneagent.priorityClassName }} + priorityClassName: {{ .Values.oneagent.priorityClassName }} + {{- end }} + + {{- if .Values.oneagent.serviceAccountName }} + serviceAccountName: {{ .Values.oneagent.serviceAccountName }} + {{- end }} + + {{- if .Values.oneagent.trustedCAs }} + trustedCAs: {{ .Values.oneagent.name }} + {{- end }} + + {{- if .Values.oneagent.networkZone }} + networkZone: {{ .Values.oneagent.networkZone }} + {{- end }} +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagentapm.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagentapm.yaml new file mode 100644 index 000000000..74d88a980 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/customresource-oneagentapm.yaml @@ -0,0 +1,46 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.mode "apm" }} +apiVersion: dynatrace.com/v1alpha1 +kind: OneAgentAPM +metadata: + name: {{ include "oneagentapm.name" . }} + namespace: {{ .Release.Namespace }} +spec: + apiUrl: {{ .Values.oneagent.apiUrl }} + tokens: {{ .Values.oneagent.name }} + + {{- if .Values.oneagent.skipCertCheck }} + skipCertCheck: {{ .Values.oneagent.skipCertCheck }} + {{- end }} + + {{- if .Values.oneagent.enableIstio }} + enableIstio: {{ .Values.oneagent.enableIstio }} + {{- end }} + + {{- if .Values.oneagent.proxy }} + proxy: + valueFrom: {{ .Values.oneagent.name }} + {{- end }} + + {{- if .Values.oneagent.trustedCAs }} + trustedCAs: {{ .Values.oneagent.name }} + {{- end }} + + {{- if .Values.oneagent.networkZone }} + networkZone: {{ .Values.oneagent.networkZone }} + {{- end }} +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-operator.yaml new file mode 100644 index 000000000..159ec6410 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-operator.yaml @@ -0,0 +1,86 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +spec: + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: {{ .Release.Name }} + strategy: + type: Recreate + template: + metadata: + labels: + name: {{ .Release.Name }} + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 8 }} + spec: + containers: + - name: {{ .Release.Name }} + args: + - operator + image: {{ include "dynatrace-oneagent-operator.image" . }} + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + ports: + - containerPort: 60000 + name: metrics + resources: + requests: + cpu: 10m + memory: 64Mi + limits: + cpu: 100m + memory: 256Mi + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: kubernetes.io/os + operator: In + values: + - linux + serviceAccountName: {{ .Release.Name }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-webhook.yaml new file mode 100644 index 000000000..de3143983 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/deployment-webhook.yaml @@ -0,0 +1,125 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + labels: + dynatrace.com/operator: oneagent +spec: + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + internal.oneagent.dynatrace.com/component: webhook + internal.oneagent.dynatrace.com/app: webhook + strategy: + type: Recreate + template: + metadata: + labels: + dynatrace.com/operator: oneagent + internal.oneagent.dynatrace.com/component: webhook + internal.oneagent.dynatrace.com/app: webhook + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - name: webhook + args: + - webhook-server + image: {{ include "dynatrace-oneagent-operator.image" . }} + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + readinessProbe: + httpGet: + path: /healthz + port: server-port + scheme: HTTPS + initialDelaySeconds: 15 + periodSeconds: 10 + ports: + - name: server-port + containerPort: 8443 + resources: + requests: + cpu: 10m + memory: 64Mi + limits: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: certs-volume + mountPath: /mnt/webhook-certs + - name: bootstrapper + args: + - webhook-bootstrapper + image: {{ include "dynatrace-oneagent-operator.image" . }} + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + resources: + requests: + cpu: 10m + memory: 64Mi + limits: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: certs-volume + mountPath: /mnt/webhook-certs + serviceAccountName: dynatrace-oneagent-webhook + volumes: + - name: certs-volume + emptyDir: {} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/mutatingwebhookconfiguration.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/mutatingwebhookconfiguration.yaml new file mode 100644 index 000000000..9248d5217 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/mutatingwebhookconfiguration.yaml @@ -0,0 +1,39 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: dynatrace-oneagent-webhook + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +webhooks: +- name: webhook.oneagent.dynatrace.com + rules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["CREATE"] + resources: ["pods"] + scope: Namespaced + namespaceSelector: + matchExpressions: + - key: oneagent.dynatrace.com/instance + operator: Exists + clientConfig: + service: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + path: /inject + admissionReviewVersions: ["v1beta1"] diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-operator.yaml new file mode 100644 index 000000000..82df3260e --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-operator.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }} +roleRef: + kind: Role + name: {{ .Release.Name }} + apiGroup: rbac.authorization.k8s.io diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-webhook.yaml new file mode 100644 index 000000000..f16fb26b1 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/rolebinding-webhook.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: dynatrace-oneagent-webhook + apiGroup: rbac.authorization.k8s.io diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/secret.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/secret.yaml new file mode 100644 index 000000000..7449e6b65 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/secret.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if .Values.secret.autoCreate }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.oneagent.name }} + namespace: {{ .Release.Namespace }} +data: + apiToken: {{ .Values.secret.apiToken | b64enc }} + paasToken: {{ .Values.secret.paasToken | b64enc }} + {{- if .Values.oneagent.proxy }} + proxy: {{ .Values.oneagent.proxy | b64enc }} + {{- end }} +type: Opaque +{{- end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/service.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/service.yaml new file mode 100644 index 000000000..cd280bb0f --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/service.yaml @@ -0,0 +1,31 @@ + +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: v1 +kind: Service +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +spec: + selector: + internal.oneagent.dynatrace.com/app: webhook + internal.oneagent.dynatrace.com/component: webhook + ports: + - port: 443 + protocol: TCP + targetPort: server-port diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/serviceaccount-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/serviceaccount-webhook.yaml new file mode 100644 index 000000000..21e17344a --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Common/serviceaccount-webhook.yaml @@ -0,0 +1,20 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-oneagent.yaml new file mode 100644 index 000000000..f17e25282 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-oneagent.yaml @@ -0,0 +1,44 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: dynatrace-oneagent + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: "*" +spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - "*" + volumes: + - "*" + hostNetwork: true + hostIPC: true + hostPID: true + hostPorts: + - min: 0 + max: 65535 + runAsUser: + rule: "RunAsAny" + seLinux: + rule: "RunAsAny" + supplementalGroups: + rule: "RunAsAny" + fsGroup: + rule: "RunAsAny" +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-operator.yaml new file mode 100644 index 000000000..7d792073c --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-operator.yaml @@ -0,0 +1,50 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: dynatrace-oneagent-operator + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: "docker/default" + apparmor.security.beta.kubernetes.io/allowedProfileNames: "runtime/default" + seccomp.security.alpha.kubernetes.io/defaultProfileName: "docker/default" + apparmor.security.beta.kubernetes.io/defaultProfileName: "runtime/default" +spec: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + volumes: + - "configMap" + - "emptyDir" + - "projected" + - "secret" + - "downwardAPI" + - "persistentVolumeClaim" + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: "MustRunAsNonRoot" + seLinux: + rule: "RunAsAny" + supplementalGroups: + rule: "RunAsAny" + fsGroup: + rule: "RunAsAny" +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-webhook.yaml new file mode 100644 index 000000000..a8cb0925f --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/podsecuritypolicy-webhook.yaml @@ -0,0 +1,50 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: dynatrace-oneagent-webhook + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: "docker/default" + apparmor.security.beta.kubernetes.io/allowedProfileNames: "runtime/default" + seccomp.security.alpha.kubernetes.io/defaultProfileName: "docker/default" + apparmor.security.beta.kubernetes.io/defaultProfileName: "runtime/default" +spec: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL + volumes: + - "configMap" + - "emptyDir" + - "projected" + - "secret" + - "downwardAPI" + - "persistentVolumeClaim" + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: "MustRunAsNonRoot" + seLinux: + rule: "RunAsAny" + supplementalGroups: + rule: "RunAsAny" + fsGroup: + rule: "RunAsAny" +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-oneagent.yaml new file mode 100644 index 000000000..f20b85ea8 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-oneagent.yaml @@ -0,0 +1,31 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: dynatrace-oneagent + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - dynatrace-oneagent + verbs: + - use +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-operator.yaml new file mode 100644 index 000000000..2b7a7866b --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-operator.yaml @@ -0,0 +1,124 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +rules: + - apiGroups: + - dynatrace.com + resources: + - oneagents + - oneagentapms + verbs: + - get + - list + - watch + - update + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - "" # "" indicates the core API group + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" # "" indicates the core API group + resources: + - pods + verbs: + - get + - list + - watch + - delete + - apiGroups: + - "" # "" indicates the core API group + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - dynatrace.com + resources: + - oneagents/finalizers + - oneagents/status + - oneagentapms/finalizers + - oneagentapms/status + verbs: + - update + - apiGroups: + - networking.istio.io + resources: + - serviceentries + - virtualservices + verbs: + - get + - list + - create + - update + - delete + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - dynatrace-oneagent-operator + verbs: + - use +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-webhook.yaml new file mode 100644 index 000000000..e3fb9c037 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/role-webhook.yaml @@ -0,0 +1,61 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - services + - configmaps + - secrets + verbs: + - get + - list + - watch + - create + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: + - dynatrace.com + resources: + - oneagentapms + verbs: + - get + - list + - watch + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - dynatrace-oneagent-webhook + verbs: + - use +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/rolebinding-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/rolebinding-oneagent.yaml new file mode 100644 index 000000000..a16c6ce73 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/rolebinding-oneagent.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: dynatrace-oneagent + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: dynatrace-oneagent +subjects: + - kind: ServiceAccount + name: dynatrace-oneagent + namespace: {{ .Release.Namespace }} +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-oneagent.yaml new file mode 100644 index 000000000..4b958ab6f --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-oneagent.yaml @@ -0,0 +1,22 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dynatrace-oneagent + namespace: {{ .Release.Namespace }} +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-operator.yaml new file mode 100644 index 000000000..5477dd8fb --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Kubernetes/serviceaccount-operator.yaml @@ -0,0 +1,22 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "kubernetes" }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/NOTES.txt b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/NOTES.txt new file mode 100644 index 000000000..288d31baa --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/NOTES.txt @@ -0,0 +1,58 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To find more information about the Dynatrace OneAgent Operator, try: +https://github.com/Dynatrace/dynatrace-oneagent-operator + +To verify the current state of the OneAgent deployment, try: + $ kubectl get pods -n {{ .Release.Namespace }} + $ kubectl logs -f deployment/{{ .Release.Name }} -n {{ .Release.Namespace }} + $ kubectl get oneagent {{ .Values.oneagent.name }} -n {{ .Release.Namespace }} + +{{- if eq .Values.mode "apm" -}} + {{- if .Values.oneagent.image -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.image + {{- end -}} + {{- if .Values.oneagent.args -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.args + {{- end -}} + {{- if .Values.oneagent.env -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.env + {{- end -}} + {{- if .Values.oneagent.nodeSelector -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.nodeSelector + {{- end -}} + {{- if .Values.oneagent.labels -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.labels + {{- end -}} + {{- if .Values.oneagent.disableAgentUpdate -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.disableAgentUpdate + {{- end -}} + {{- if .Values.oneagent.dnsPolicy -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.dnsPolicy + {{- end -}} + {{- if .Values.oneagent.resources -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.resources + {{- end -}} + {{- if .Values.oneagent.tolerations -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.tolerations + {{- end -}} + {{- if .Values.oneagent.waitReadySeconds -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.waitReadySeconds + {{- end -}} + {{- if .Values.oneagent.priorityClassName -}} +WARNING: +The following argument did not get applied since it can only be used with fullstack: oneagent.priorityClassName + {{- end -}} +{{- end -}} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-operator.yaml new file mode 100644 index 000000000..7f02c04d4 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-operator.yaml @@ -0,0 +1,116 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "openshift" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabels" . | nindent 4 }} +rules: + - apiGroups: + - dynatrace.com + resources: + - oneagents + - oneagentapms + verbs: + - get + - list + - watch + - update + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - "" # "" indicates the core API group + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" # "" indicates the core API group + resources: + - pods + verbs: + - get + - list + - watch + - delete + - apiGroups: + - "" # "" indicates the core API group + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - dynatrace.com + resources: + - oneagents/finalizers + - oneagents/status + - oneagentapms/finalizers + - oneagentapms/status + verbs: + - update + - apiGroups: + - networking.istio.io + resources: + - serviceentries + - virtualservices + verbs: + - get + - list + - create + - update + - delete +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-webhook.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-webhook.yaml new file mode 100644 index 000000000..39ce2371b --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/role-webhook.yaml @@ -0,0 +1,53 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "openshift" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: dynatrace-oneagent-webhook + namespace: {{ .Release.Namespace }} + labels: + {{- include "dynatrace-oneagent-operator.commonlabelswebhook" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - services + - configmaps + - secrets + verbs: + - get + - list + - watch + - create + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: + - dynatrace.com + resources: + - oneagentapms + verbs: + - get + - list + - watch +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/securitycontextconstraints.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/securitycontextconstraints.yaml new file mode 100644 index 000000000..86a76840d --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/securitycontextconstraints.yaml @@ -0,0 +1,50 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "openshift" }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: "dynatrace-oneagent-privileged allows access to all privileged and host features and the ability to run as any user, any group, any fsGroup, and with any SELinux context. This is a copy of privileged scc." + name: dynatrace-oneagent-privileged +allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegedContainer: true +allowedCapabilities: + - "*" +allowedFlexVolumes: null +defaultAddCapabilities: [] +fsGroup: + type: RunAsAny +priority: 1 +readOnlyRootFilesystem: false +requiredDropCapabilities: [] +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: + - "*" +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:dynatrace:dynatrace-oneagent +volumes: + - "*" +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-oneagent.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-oneagent.yaml new file mode 100644 index 000000000..100b93ecc --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-oneagent.yaml @@ -0,0 +1,25 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "openshift" }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dynatrace-oneagent + namespace: {{ .Release.Namespace }} +imagePullSecrets: + - name: redhat-connect + - name: redhat-connect-sso +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-operator.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-operator.yaml new file mode 100644 index 000000000..d80abab71 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/Openshift/serviceaccount-operator.yaml @@ -0,0 +1,25 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $platformIsSet := printf "%s" (required "Platform needs to be set to kubernetes or openshift" (include "dynatrace-oneagent-operator.platformSet" .))}} +{{- if eq .Values.platform "openshift" }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dynatrace-oneagent-operator + namespace: {{ .Release.Namespace }} +imagePullSecrets: + - name: redhat-connect + - name: redhat-connect-sso +{{ end }} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/_helpers.tpl b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/_helpers.tpl new file mode 100644 index 000000000..1ad8117c3 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/templates/_helpers.tpl @@ -0,0 +1,113 @@ +// Copyright 2019 Dynatrace LLC + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "dynatrace-oneagent-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "dynatrace-oneagent-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "dynatrace-oneagent-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "dynatrace-oneagent-operator.commonlabels" -}} +dynatrace: operator +operator: oneagent +helm.sh/chart: {{ include "dynatrace-oneagent-operator.chart" . }} +{{- end -}} + +{{/* +Common labels webhook +*/}} +{{- define "dynatrace-oneagent-operator.commonlabelswebhook" -}} +dynatrace.com/operator: oneagent +internal.oneagent.dynatrace.com/component: webhook +helm.sh/chart: {{ include "dynatrace-oneagent-operator.chart" . }} +{{- end -}} + +{{/* +Check if platform is set +*/}} +{{- define "dynatrace-oneagent-operator.platformSet" -}} +{{- if or (eq .Values.platform "kubernetes") (eq .Values.platform "openshift") -}} + {{ default "set" }} +{{- end -}} +{{- end -}} + +{{/* +Check if default oneagent image is used +*/}} +{{- define "dynatrace-oneagent.image" -}} +{{- if .Values.oneagent.image -}} + {{- printf "%s" .Values.oneagent.image -}} +{{- else -}} + {{- if eq .Values.platform "kubernetes" -}} + {{- printf "docker.io/dynatrace/oneagent" }} + {{- end -}} + {{- if eq .Values.platform "openshift" -}} + {{- printf "registry.connect.redhat.com/dynatrace/oneagent" }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Check if default operator image is used +*/}} +{{- define "dynatrace-oneagent-operator.image" -}} +{{- if .Values.operator.image -}} + {{- printf "%s" .Values.operator.image -}} +{{- else -}} + {{- printf "%s:v%s" "docker.io/dynatrace/dynatrace-oneagent-operator" .Chart.AppVersion }} +{{- end -}} +{{- end -}} + +{{/* +Check for correct oneagentapm name +*/}} +{{- define "oneagentapm.name" -}} +{{- if eq .Values.mode "apm" }} + {{- if eq .Values.oneagent.name "oneagent" -}} + {{- printf "oneagentapm" -}} + {{- else -}} + {{- printf "%s" .Values.oneagent.name -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/values.yaml b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/values.yaml new file mode 100644 index 000000000..2636c7880 --- /dev/null +++ b/charts/dynatrace-oneagent-operator/dynatrace-oneagent-operator/0.8.000/values.yaml @@ -0,0 +1,51 @@ +# Copyright 2019 Dynatrace LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +platform: "kubernetes" +mode: "fullstack" + +operator: + image: "" + +oneagent: + name: "oneagent" + apiUrl: "" + image: "" + # The expected format is YAML and not a string + args: "" + # The expected format is YAML and not a string + env: "" + # The expected format is YAML and not a string + nodeSelector: "" + # The expected format is YAML and not a string + labels: "" + skipCertCheck: false + disableAgentUpdate: false + enableIstio: false + dnsPolicy: "" + # The expected format is YAML and not a string + resources: "" + # The expected format is YAML and not a string + tolerations: "" + waitReadySeconds: null + priorityClassName: "" + serviceAccountName: "" + proxy: "" + trustedCAs: "" + networkZone: "" + +secret: + autoCreate: true + apiToken: "" + paasToken: "" diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/.helmignore b/charts/falcon-sensor/falcon-sensor/0.9.300/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/Chart.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/Chart.yaml new file mode 100644 index 000000000..f00d7c5e8 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: CrowdStrike Falcon Platform + catalog.cattle.io/release-name: falcon-helm +apiVersion: v2 +appVersion: 0.9.3 +description: A Helm chart to deploy CrowdStrike Falcon sensors into Kubernetes clusters. +home: https://crowdstrike.com +icon: https://raw.githubusercontent.com/CrowdStrike/falcon-helm/main/images/crowdstrike-logo.svg +keywords: +- CrowdStrike +- Falcon +- EDR +- kubernetes +- security +- monitoring +- alerting +maintainers: +- name: CrowdStrike Solution Architecture +- email: gabriel.alford@crowdstrike.com + name: Gabe Alford +name: falcon-sensor +sources: +- https://github.com/CrowdStrike/falcon-helm +type: application +version: 0.9.300 diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/README.md b/charts/falcon-sensor/falcon-sensor/0.9.300/README.md new file mode 100644 index 000000000..b8b5925aa --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/README.md @@ -0,0 +1,90 @@ +# CrowdStrike Falcon Helm Chart + +[Falcon](https://www.crowdstrike.com/) is the [CrowdStrike](https://www.crowdstrike.com/) +platform purpose-built to stop breaches via a unified set of cloud-delivered +technologies that prevent all types of attacks — including malware and much +more. + +# Kubernetes Cluster Compatability + +The Falcon Helm chart has been tested to deploy on the following Kubernetes distributions: + +* Amazon Elastic Kubernetes Service (EKS) +* Azure Kubernetes Service (AKS) - Linux Nodes Only +* Google Kubernetes Engine (GKE) +* Rancher K3s + * Nodes must be Linux distributions supported by CrowdStrike. See [https://falcon.crowdstrike.com/support/documentation/20/falcon-sensor-for-linux#operating-systems](https://falcon.crowdstrike.com/support/documentation/20/falcon-sensor-for-linux#operating-systems) for supported Linux distributions and kernels. +* Red Hat OpenShift Container Platform 4.6+ + +# Dependencies + +1. Requires a x86_64 Kubernetes cluster +1. Must be a CrowdStrike customer with access to the Falcon Linux Sensor and Falcon Container downloads. +1. Before deploying the Helm chart, you should have a Falcon Linux Sensor in the container registry before installing the Helm Chart. See the Deployment Considerations for more. +1. Helm 3.x is installed and supported by the Kubernetes vendor. + +# Deployment Considerations + +To ensure a successful deployment, you will want to ensure that: +1. By default, the Helm Chart installs in the `default` namespace. Best practices for deploying to Kubernetes is to create a new namespace. This can be done by adding `-n falcon-system --create-namespace` to your `helm install` command. +1. You have access to a containerized falcon sensor image. This is most likely through a private image registry on your network or cloud provider. See [https://github.com/CrowdStrike/Dockerfiles](https://github.com/CrowdStrike/Dockerfiles) as an example of how to build a Falcon sensor for your registry. +1. The Falcon Linux Sensor (not the Falcon Container) should be used in the container image to deploy to Kubernetes nodes. +1. When deploying the Falcon Linux Sensor to a node, the container image should match the node's operating system. For example, if the node is running Red Hat Enterprise Linux 8.2, the container image should be based on Red Hat Enterprise Linux 8.2, etc. This is important to ensure sensor and image compatibility with the base node operating system. +1. You must have sufficient permissions to deploy Helm Charts to the cluster. This is often received through cluster admin privileges. +1. Only deploying to Kubernetes nodes are supported at this time. +1. When deploying the Falcon Linux Sensor as a container to Kubernetes nodes, it is a requirement that the Falcon Sensor run as a privileged container so that the Sensor can properly work with the kernel. If this is unacceptable, you can install the Falcon Linux Sensor (still runs with privileges) using an RPM or DEB package on the nodes themselves. This assumes that you have the capability to actually install RPM or DEB packages on the nodes. If you do not have this capability and you want to protect the nodes, you have to install using a privileged container. +1. CrowdStrike's Helm Operator is a project, not a product, and released to the community as a way to automate sensor deployment to kubernetes clusters. The upstream repository for this project is [https://github.com/CrowdStrike/falcon-helm](https://github.com/CrowdStrike/falcon-helm). + +# Installation + +### Add the CrowdStrike Falcon Helm repository + +``` +helm repo add crowdstrike https://crowdstrike.github.io/falcon-helm +``` + +### Install CrowdStrike Falcon Helm Chart + +``` +helm upgrade --install falcon-helm crowdstrike/falcon-sensor \ + --set falcon.cid="" \ + --set node.image.repository="/falcon-node-sensor" +``` + +Above command will install the CrowdStrike Falcon Helm Chart with the release name `falcon-helm` in the namespace your `kubectl` context is currently set to. +You can install also install into a customized namespace by running the following: + +``` +helm upgrade --install falcon-helm crowdstrike/falcon-sensor \ + -n falcon-system --create-namespace \ + --set falcon.cid="" \ + --set node.image.repository="/falcon-node-sensor" +``` + +For more details please see the [falcon-helm](https://github.com/CrowdStrike/falcon-helm) repository. + +## Node Configuration + +The following tables lists the more common configurable parameters of the chart and their default values for installing on a Kubernetes node. + +| Parameter | Description | Default | +|:--------------------------------|:---------------------------------------------------------------------|:----------------------------------------- | +| `node.enabled` | Enable installation on the Kubernetes node | `true` | +| `node.image.repository` | Falcon Sensor Node registry/image name | `falcon-node-sensor` | +| `node.image.tag` | The version of the official image to use | `latest` | +| `node.image.pullPolicy` | Policy for updating images | `Always` | +| `node.image.pullSecrets` | Pull secrets for private registry | `{}` | +| `falcon.cid` | CrowdStrike Customer ID (CID) | None (Required) | + +`falcon.cid` and `node.image.repository` are required values. + +### Uninstall Helm Chart +To uninstall, run the following command: +``` +helm uninstall falcon-helm +``` + +To uninstall from a custom namespace, run the following command: +``` +helm uninstall falcon-helm -n falcon-system +``` diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/app-readme.md b/charts/falcon-sensor/falcon-sensor/0.9.300/app-readme.md new file mode 100644 index 000000000..7f653f423 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/app-readme.md @@ -0,0 +1,9 @@ +# CrowdStrike Falcon + +[CrowdStrike](https://www.crowdstrike.com/) [Container Security](https://www.crowdstrike.com/cloud-security-products/falcon-cloud-workload-protection/) +comes complete with vulnerability management, continuous +threat detection and response, and runtime protection, combined with compliance +enforcement and automated continuous integration/continuous delivery (CI/CD) pipeline security, enabling +DevOps teams to stay secure while building in the cloud. + +For more information, please visit [https://www.crowdstrike.com/cloud-security-products/falcon-cloud-workload-protection/](https://www.crowdstrike.com/cloud-security-products/falcon-cloud-workload-protection/) diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/ci/cid-values.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/ci/cid-values.yaml new file mode 100644 index 000000000..8e93d8809 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/ci/cid-values.yaml @@ -0,0 +1,2 @@ +falcon: + cid: 123456789TESTS-00 diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/questions.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/questions.yaml new file mode 100644 index 000000000..9fa523a3a --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/questions.yaml @@ -0,0 +1,97 @@ +questions: + - variable: node.image.repository + description: "URL of container image repository holding containerized Falcon sensor. Defaults to 'falcon-node-sensor'." + required: true + type: string + default: falcon-node-sensor + label: Container Image Repository + group: "Node Container Images" + + - variable: node.image.tag + description: "Container registry image tag. Defaults to 'latest'." + required: true + type: string + default: "latest" + label: Container Image Tag + group: "Node Container Images" + + - variable: falcon.cid + description: "Passed to falconctl as \"--cid=\"{uuid string}\"\"" + required: true + type: string + label: CrowdStrike Customer ID (CID) + group: "Falcon Sensor Node Settings" + + - variable: falcon.apd + description: "App Proxy Disable. Passed to falconctl as \"--apt=true\" or \"--apt=false\"." + required: false + type: boolean + default: false + label: Disable using a proxy + group: "Falcon Sensor Node Settings" + + - variable: falcon.aph + description: "App Proxy Hostname (APH). Uncommon in container-based deployments. Passed to falconctl as \"--aph \"" + required: false + type: string + label: Configure Proxy Host + group: "Falcon Sensor Node Settings" + + - variable: falcon.app + description: "App Proxy Port (APP). Uncommon in container-based deployments. Passed to falconctl as \"--app=\"" + required: false + type: string + label: Configure Proxy Port + group: "Falcon Sensor Node Settings" + + - variable: falcon.trace + description: "Options are [none|err|warn|info|debug]. Passed to falconctl as \"--trace=[none|err|warn|info|debug]\"" + required: false + type: string + label: Set logging trace level + default: "none" + group: "Falcon Sensor Node Settings" + + - variable: falcon.feature + description: "Options to pass to the \"--feature\" flag. Options are [none,[enableLog[,disableLogBuffer[,disableOsfm[,emulateUpdate]]]]]" + required: false + type: string + label: Enable or disable certain sensor features + group: "Falcon Sensor Node Settings" + + - variable: falcon.update + description: "SIGHUP the sensor for immediate trace/feature update." + required: false + type: boolean + default: false + label: Update sensor immediately + group: "Falcon Sensor Node Settings" + + - variable: falcon.message_log + description: "Enable message log (true/false)" + required: false + type: boolean + default: false + label: Enable logging + group: "Falcon Sensor Node Settings" + + - variable: falcon.billing + description: "Utilize default or metered billing. Should only be configured when needing to switch between the two. Options are: [default|metered]" + required: false + type: string + label: Configure Billing + group: "Falcon Sensor Node Settings" + + - variable: falcon.tags + description: "Comma separated list of tags for sensor grouping. Allowed characters: all alphanumerics, '/', '-', '_', and ','." + required: false + type: string + label: Configure tags for sensor grouping + group: "Falcon Sensor Node Settings" + + - variable: falcon.provisioning_token + description: "Used to protect the CID. Provisioning token value." + required: false + type: string + label: Set a provisioning installation token + group: "Falcon Sensor Node Settings" diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/templates/NOTES.txt b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/NOTES.txt new file mode 100644 index 000000000..4c3c475f2 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/NOTES.txt @@ -0,0 +1,10 @@ +Thank you for installing the CrowdStrike Falcon Helm Chart! + +Access to the Falcon Linux and Container Sensor downloads at https://falcon.crowdstrike.com/hosts/sensor-downloads are +required to complete the install of this Helm chart. This is provided automatically to all active CrowdStrike customers. +Additionally, a containerized sensor must be present in a container registry accessible from Kubernetes installation. +Sample Dockerfiles are available at https://github.com/CrowdStrike/Dockerfiles. +CrowdStrike Falcon sensors will deploy across all nodes in your Kubernetes cluster after +installing this Helm chart. An extremely common error on installation is accidentally +forgetting to add your containerized sensor to your local image registry prior to executing +`helm install`. The default image name to deploy a kernel sensor to a node is `falcon-node-sensor`. diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/templates/_helpers.tpl b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/_helpers.tpl new file mode 100644 index 000000000..316fee125 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "falcon-sensor.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "falcon-sensor.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "falcon-sensor.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "falcon-sensor.labels" -}} +helm.sh/chart: {{ include "falcon-sensor.chart" . }} +{{ include "falcon-sensor.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "falcon-sensor.selectorLabels" -}} +app.kubernetes.io/name: {{ include "falcon-sensor.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "falcon-sensor.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "falcon-sensor.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/templates/configmap.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/configmap.yaml new file mode 100644 index 000000000..69fba2e22 --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "falcon-sensor.fullname" . }}-config + namespace: {{ .Release.Namespace }} + labels: + app: "{{ include "falcon-sensor.fullname" . }}" + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + FALCONCTL_OPT_CID: {{ .Values.falcon.cid }} + {{- range $key, $value := .Values.falcon }} + {{- if and ($value) (ne $key "cid") }} + FALCONCTL_OPT_{{ $key | upper }}: {{ $value | quote }} + {{- end }} + {{- end }} diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/templates/daemonset.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/daemonset.yaml new file mode 100644 index 000000000..79da7e17c --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/templates/daemonset.yaml @@ -0,0 +1,134 @@ +{{- if .Values.node.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "falcon-sensor.fullname" . }} + labels: + name: {{ include "falcon-sensor.fullname" . }} + app: {{ include "falcon-sensor.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + {{- if .Values.node.daemonset.labels }} + {{- range $key, $value := .Values.node.daemonset.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.node.daemonset.annotations }} + annotations: + {{- range $key, $value := .Values.node.daemonset.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + name: {{ include "falcon-sensor.fullname" . }} + app: {{ include "falcon-sensor.fullname" . }} + release: {{ .Release.Name | quote }} + updateStrategy: + type: {{ .Values.node.daemonset.updateStrategy }} + template: + metadata: + annotations: + sensor.falcon-system.crowdstrike.com/injection: disabled + {{- range $key, $value := .Values.node.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + name: {{ include "falcon-sensor.fullname" . }} + app: {{ include "falcon-sensor.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + {{- if .Values.node.daemonset.labels }} + {{- range $key, $value := .Values.node.daemonset.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + {{- with .Values.node.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + tolerations: + # this toleration is to have the daemonset runnable on master nodes + - key: node-role.kubernetes.io/master + effect: NoSchedule + nodeSelector: + beta.kubernetes.io/os: linux + initContainers: + # This init container creates empty falconstore file so that when + # it's mounted into the sensor-node-container, k8s would just use it + # rather than creating a directory. Mounting falconstore file as + # a file volume ensures that AID is preserved across container + # restarts. + - name: init-falconstore + image: busybox + args: [/bin/sh, -c, 'touch /var/lib/crowdstrike/falconstore'] + volumeMounts: + - name: falconstore-dir + mountPath: /var/lib/crowdstrike + containers: + - name: falcon-node-sensor + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + imagePullPolicy: "{{ .Values.node.image.pullPolicy }}" + volumeMounts: + - name: dev + mountPath: /dev + - name: var-run + mountPath: /var/run + - name: etc + mountPath: /etc + - name: var-log + mountPath: /var/log + - name: falconstore + mountPath: /opt/CrowdStrike/falconstore + # Various pod security context settings. Bear in mind that many of these have an impact + # on the Falcon Sensor working correctly. + # + # - User that the container will execute as. Typically necessary to run as root (0). + # - Runs the Falcon Sensor containers as privileged containers. This is required when + # running the Falcon Linux Sensor on Kubernetes nodes to properly run in the node's + # kernel and to actually protect the node. + securityContext: + runAsUser: 0 + privileged: true + readOnlyRootFilesystem: false + allowPrivilegeEscalation: true + envFrom: + - configMapRef: + name: {{ include "falcon-sensor.fullname" . }}-config + # This spits out logs from sensor-node-container to stdout so that they + # are routed through k8s log driver. + - name: log + image: busybox + args: [/bin/sh, -c, 'tail -n1 -f /var/log/falcon-sensor.log'] + volumeMounts: + - name: var-log + mountPath: /var/log + readOnly: True + volumes: + - name: dev + hostPath: + path: /dev + - name: etc + hostPath: + path: /etc + - name: var-run + hostPath: + path: /var/run + - name: var-log + emptyDir: {} + - name: falconstore + hostPath: + path: /var/lib/crowdstrike/falconstore + - name: falconstore-dir + hostPath: + path: /var/lib/crowdstrike + terminationGracePeriodSeconds: {{ .Values.node.terminationGracePeriod }} + hostNetwork: true + hostPID: true + hostIPC: true +{{- end }} diff --git a/charts/falcon-sensor/falcon-sensor/0.9.300/values.yaml b/charts/falcon-sensor/falcon-sensor/0.9.300/values.yaml new file mode 100644 index 000000000..1c1c8c1fe --- /dev/null +++ b/charts/falcon-sensor/falcon-sensor/0.9.300/values.yaml @@ -0,0 +1,50 @@ +# Default values for falcon-sensor. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +node: + # When enabled, Helm chart deploys the Falcon Senors to Kubernetes nodes + enabled: true + + daemonset: + # Annotations to apply to the daemonset + annotations: {} + + # additionals labels + labels: {} + + updateStrategy: RollingUpdate + + image: + repository: falcon-node-sensor + pullPolicy: Always + pullSecrets: {} + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + # Override various naming aspects of this chart + # Only edit these if you know what you're doing + nameOverride: "" + fullnameOverride: "" + + podAnnotations: {} + + # How long to wait for Falcon pods to stop gracefully + terminationGracePeriod: 10 + +falcon: + cid: + aid: + apd: + aph: + app: + trace: + feature: + update: + message_log: + billing: + tags: + assert: + memfail_grace_period: + memfail_every_n: + provisioning_token: diff --git a/charts/federatorai/federatorai/4.5.100/.helmignore b/charts/federatorai/federatorai/4.5.100/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/federatorai/federatorai/4.5.100/Chart.yaml b/charts/federatorai/federatorai/4.5.100/Chart.yaml new file mode 100644 index 000000000..dd2812c46 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/Chart.yaml @@ -0,0 +1,25 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Federator.ai + catalog.cattle.io/release-name: federatorai +apiVersion: v1 +appVersion: 4.5.1-ga +description: Federator.ai helps enterprises optimize cloud resources, maximize application + performance, and save significant cost without excessive over-provisioning or under-provisioning + of resources, meeting the service-level requirements of their applications. +home: https://www.prophetstor.com +icon: https://raw.githubusercontent.com/prophetstor-ai/public/master/images/logo.png +keywords: +- AI +- Resource Orchestration +- NoOps +- AIOps +- Intelligent Workload Management +- Cost Optimization +maintainers: +- email: support@prophetstor.com + name: ProphetStor Data Services, Inc. +name: federatorai +sources: +- https://www.prophetstor.com +version: 4.5.100 diff --git a/charts/federatorai/federatorai/4.5.100/README.md b/charts/federatorai/federatorai/4.5.100/README.md new file mode 100644 index 000000000..6e13bf0bb --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/README.md @@ -0,0 +1,96 @@ +# Federator.ai Operator +Federator.ai helps enterprises optimize cloud resources, maximize application performance, and save significant cost without excessive over-provisioning or under-provisioning of resources, meeting the service-level requirements of their applications. + +Enterprises often lack understanding of the resources needed to support their applications. This leads to either excessive over-provisioning or under-provisioning of resources (CPU, memory, storage). Using machine learning, Federator.ai determines the optimal cloud resources needed to support any workload on OpenShift and helps users find the best-cost instances from cloud providers for their applications. + + +**Multi-layer workload prediction** + +Using machine learning and math-based algorithms, Federator.ai predicts containerized application and cluster node resource usage as the basis for resource recommendations at application level as well as at cluster node level. Federator.ai supports prediction for both physical/virtual CPUs and memories. + + +**Auto-scaling via resource recommendation** + +Federator.ai utilizes the predicted resource usage to recommend the right number and size of pods for applications. Integrated with Datadog's WPA, applications are automatically scaled to meet the predicted resource usage. + + +**Application-aware recommendation execution** + +Optimizing the resource usage and performance goals, Federator.ai uses application specific metrics for workload prediction and pod capacity estimation to auto-scale the right number of pods for best performance without overprovisioning. + + +**Multi-cloud Cost Analysis** + +With resource usage prediction, Federator.ai analyzes potential cost of a cluster on different public cloud providers. It also recommend appropriate cluster nodes and instance types based on resource usage. + + +**Custom Datadog/Sysdig Dashboards** + +Predefined custom Datadog/Sysdig Dashboards for workload prediction/recommendation visualization for cluster nodes and applications. + + +**Additional resources** + +Want more product information? Explore detailed information about using this product and where to find additional help. + +* [Federator.ai Datasheet](https://www.prophetstor.com/wp-content/uploads/2021/02/Federator.ai%C2%AE_202102ver.pdf) +* [Quick Start Guide](https://www.prophetstor.com/wp-content/uploads/2021/04/ProphetStor-Federator.ai-v4.5.1-Quick-Installation-Guide.pdf) +* [Federator.ai User Guide](https://www.prophetstor.com/wp-content/uploads/2021/04/Federator.ai-4.5.1-User-Guide.pdf) +* [Company Information](https://www.prophetstor.com/) + +## Prerequisites +- The [Kubernetes](https://kubernetes.io/) version 1.18 or later if using Kubernetes environment. +- The [Openshift](https://www.openshift.com) version 4.x.x or later if using OpenShift platform. +- The [Helm](https://helm.sh/) version is 3.x.x or later. + +## Add Helm chart repository +``` +helm repo add prophetstor https://prophetstor-ai.github.io/federatorai-operator-helm/ +``` + +## Test the Helm chart repository +``` +helm search repo federatorai +``` + +## Installing with the release name `my-name`: +``` +helm install `my-name` prophetstor/federatorai --namespace=federatorai --create-namespace +``` + +## To uninstall/delete the `my-name` deployment: +``` +helm ls --all-namespaces +helm delete `my-name` --namespace=federatorai +``` + + +## Configuration + +The following table lists the configurable parameters of the chart and their default values are specfied insde values.yaml. + +| Parameter | Description | +| -------------------------------------------------------------- | --------------------------------------------- | +| `image.pullPolicy` | Container pull policy | +| `image.repository` | Image for Federator.ai operator | +| `image.tag` | Image Tag for Federator.ai operator | +| `federatorai.imageLocation` | Image Location for services containers | +| `federatorai.version` | Image Tag for services containers | +| `federatorai.persistence.enabled` | Enable persistent volumes | +| `federatorai.persistence.storageClass` | Storage Class Name of persistent volumes | +| `federatorai.persistence.storages.logStorage.size` | Log volume size | +| `federatorai.persistence.aiCore.dataStorage.size` | AICore data volume size | +| `federatorai.persistence.influxdb.dataStorage.size` | Influxdb data volume size | +| `federatorai.persistence.fedemeterInfluxdb.dataStorage.size` | Fedemeter influxdb data volume size | +| `services.dashboardFrontend.nodePort` | Port of the Dashboard service | +| `services.rest.nodePort` | Port of the REST service | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```shell +helm install `my-name` prophetstor/federatorai -f values.yaml --namespace=federatorai --create-namespace +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/charts/federatorai/federatorai/4.5.100/app-readme.md b/charts/federatorai/federatorai/4.5.100/app-readme.md new file mode 100644 index 000000000..a7612f8f4 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/app-readme.md @@ -0,0 +1,39 @@ +# Federator.ai Operator +Federator.ai helps enterprises optimize cloud resources, maximize application performance, and save significant cost without excessive over-provisioning or under-provisioning of resources, meeting the service-level requirements of their applications. + +Enterprises often lack understanding of the resources needed to support their applications. This leads to either excessive over-provisioning or under-provisioning of resources (CPU, memory, storage). Using machine learning, Federator.ai determines the optimal cloud resources needed to support any workload on OpenShift and helps users find the best-cost instances from cloud providers for their applications. + + +**Multi-layer workload prediction** + +Using machine learning and math-based algorithms, Federator.ai predicts containerized application and cluster node resource usage as the basis for resource recommendations at application level as well as at cluster node level. Federator.ai supports prediction for both physical/virtual CPUs and memories. + + +**Auto-scaling via resource recommendation** + +Federator.ai utilizes the predicted resource usage to recommend the right number and size of pods for applications. Integrated with Datadog's WPA, applications are automatically scaled to meet the predicted resource usage. + + +**Application-aware recommendation execution** + +Optimizing the resource usage and performance goals, Federator.ai uses application specific metrics for workload prediction and pod capacity estimation to auto-scale the right number of pods for best performance without overprovisioning. + + +**Multi-cloud Cost Analysis** + +With resource usage prediction, Federator.ai analyzes potential cost of a cluster on different public cloud providers. It also recommend appropriate cluster nodes and instance types based on resource usage. + + +**Custom Datadog/Sysdig Dashboards** + +Predefined custom Datadog/Sysdig Dashboards for workload prediction/recommendation visualization for cluster nodes and applications. + + +**Additional resources** + +Want more product information? Explore detailed information about using this product and where to find additional help. + +* [Federator.ai Datasheet](https://www.prophetstor.com/wp-content/uploads/2021/02/Federator.ai%C2%AE_202102ver.pdf) +* [Quick Start Guide](https://www.prophetstor.com/wp-content/uploads/2021/04/ProphetStor-Federator.ai-v4.5.1-Quick-Installation-Guide.pdf) +* [Federator.ai User Guide](https://www.prophetstor.com/wp-content/uploads/2021/04/Federator.ai-4.5.1-User-Guide.pdf) +* [Company Information](https://www.prophetstor.com/) diff --git a/charts/federatorai/federatorai/4.5.100/crds/02-alamedaservice.crd.yaml b/charts/federatorai/federatorai/4.5.100/crds/02-alamedaservice.crd.yaml new file mode 100644 index 000000000..7afa71628 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/crds/02-alamedaservice.crd.yaml @@ -0,0 +1,5009 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + helm.sh/hook: crd-install + creationTimestamp: null + name: alamedaservices.federatorai.containers.ai +spec: + additionalPrinterColumns: + - JSONPath: .spec.enableExecution + name: Execution + type: boolean + - JSONPath: .spec.version + name: Version + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: federatorai.containers.ai + names: + kind: AlamedaService + listKind: AlamedaServiceList + plural: alamedaservices + singular: alamedaservice + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: AlamedaService is the Schema for the alamedaservices API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlamedaServiceSpec defines the desired state of AlamedaService + properties: + alameda-analyzer: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alameda-dispatcher: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alameda-weavescope: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaAdmissionController: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaAi: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaDatahub: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaEvictioner: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaExecutor: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaInfluxdb: + description: Component Section Schema + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaNotifier: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaOperator: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaRabbitMQ: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + alamedaRecommender: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + autoPatchPrometheusRules: + type: boolean + enableAgentApp: + type: boolean + enableExecution: + type: boolean + enableGPU: + type: boolean + enablePreloader: + type: boolean + enableWeavescope: + type: boolean + env: + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources + limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + fedemeter: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + fedemeterInfluxdb: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiAgent: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiAgentApp: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiAgentGPU: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + influxDB: + properties: + address: + type: string + password: + type: string + username: + type: string + required: + - address + - password + - username + type: object + prometheus: + properties: + address: + type: string + password: + type: string + username: + type: string + required: + - address + - password + - username + type: object + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiAgentPreloader: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiBackend: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiDataAdapter: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiFrontend: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiRecommendDispatcher: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiRecommendWorker: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + federatoraiRest: + properties: + bootstrap: + properties: + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + version: + type: string + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + type: string + imagepullpolicy: + description: PullPolicy describes a policy for if/when to pull a + container image + type: string + replicas: + format: int32 + minimum: 0 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + type: object + imageLocation: + type: string + kafka: + properties: + brokerAddresses: + items: + type: string + type: array + sasl: + properties: + enabled: + type: boolean + password: + type: string + username: + type: string + type: object + tls: + description: Version string `json:"version,omitempty"` + properties: + enabled: + type: boolean + insecureSkipVerify: + type: boolean + type: object + type: object + keycode: + description: KeycodeSpec contains data for keycode check + properties: + codeNumber: + description: CodeNumber provides user to apply keycode to Federator.ai + type: string + signatureData: + description: SignatureData provides user to apply signature data + type: string + required: + - codeNumber + type: object + platform: + enum: + - openshift3.9 + type: string + prometheusService: + type: string + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selfDriving: + type: boolean + serviceExposures: + items: + description: ServiceExposureSpec defines the service to be exposed + properties: + name: + type: string + nodePort: + description: NodePortSpec defines the ports to be proxied from + node to service + properties: + ports: + items: + description: PortSpec defines the service port + properties: + nodePort: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + port: + format: int32 + maximum: 65535 + minimum: 0 + type: integer + required: + - nodePort + - port + type: object + type: array + required: + - ports + type: object + type: + description: ServiceExposureType defines the type of the service + to be exposed + enum: + - NodePort + type: string + required: + - name + - type + type: object + type: array + storages: + items: + properties: + accessModes: + items: + type: string + type: array + class: + type: string + size: + type: string + type: + enum: + - pvc + - ephemeral + type: string + usage: + enum: + - log + - data + type: string + required: + - type + - usage + type: object + type: array + version: + type: string + required: + - keycode + - version + type: object + status: + description: AlamedaServiceStatus defines the observed state of AlamedaService + properties: + conditions: + items: + properties: + message: + type: string + paused: + description: Represents whether any actions on the underlaying + managed objects are being performed. Only delete actions will + be performed. + type: boolean + required: + - message + - paused + type: object + type: array + crdversion: + properties: + crdname: + type: string + scalerversion: + type: string + required: + - crdname + - scalerversion + type: object + keycodeStatus: + description: KeycodeStatus contains current keycode information + properties: + codeNumber: + description: CodeNumber represents the last keycode user successfully + applied + type: string + lastErrorMessage: + description: LastErrorMessage stores the error message that happend + when Federatorai-Operator handled keycode + type: string + registrationData: + description: RegistrationData contains data needs to be sent for + activating keycode + type: string + state: + description: State represents the state of keycode processing + type: string + summary: + description: Summary stores the summary of the keycode + type: string + required: + - codeNumber + - lastErrorMessage + - registrationData + - state + - summary + type: object + required: + - conditions + - crdversion + - keycodeStatus + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/federatorai/federatorai/4.5.100/logo.png b/charts/federatorai/federatorai/4.5.100/logo.png new file mode 100644 index 000000000..fc33e5009 Binary files /dev/null and b/charts/federatorai/federatorai/4.5.100/logo.png differ diff --git a/charts/federatorai/federatorai/4.5.100/questions.yaml b/charts/federatorai/federatorai/4.5.100/questions.yaml new file mode 100644 index 000000000..ab341a49c --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/questions.yaml @@ -0,0 +1,90 @@ +questions: +#image configurations +- variable: defaultImage + default: true + description: "Use default Federator.ai image or specify a custom one" + label: Use Default Federator.ai Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: image.repository + default: "quay.io/prophetstor/federatorai-operator-ubi" + description: "Federator.ai Operator image name" + type: string + group: "Container Images" + label: Federator.ai Operator Image Name + - variable: image.tag + default: "v4.5.1-ga" + description: "Federator.ai Operator image tag" + type: string + group: "Container Images" + label: Federator.ai Operator Image Tag +#service configurations +- variable: federatorai.imageLocation + default: "quay.io/prophetstor" + description: "Service containers image location" + type: string + required: true + group: "Container Images" + label: Federator.ai imageLocation +- variable: federatorai.version + default: "v4.5.1-ga" + description: "Service containers version" + type: string + required: true + group: "Container Images" + label: Service Containers Image Tag +- variable: services.dashboardFrontend.nodePort + required: true + default: "31012" + description: "The port where the Federator.ai Dashboard listens to" + type: string + group: "Service Settings" + label: Federator.ai Dashboard Port +- variable: services.rest.nodePort + required: true + default: "31011" + description: "The port where the Federator.ai REST listens to" + type: string + group: "Service Settings" + label: Federator.ai REST Port +- variable: federatorai.persistence.enabled + default: true + description: "Enable persistent volume for Federator.ai" + type: boolean + required: true + label: Federator.ai Persistent Volume Enabled + show_subquestion_if: true + group: "PV Settings" + subquestions: + - variable: federatorai.persistence.storageClass + default: "" + description: "If undefined or set to null, using the default storageClass. Defaults to null." + type: storageclass + group: "PV Settings" + label: Storage Class for Federator.ai + - variable: federatorai.persistence.storages.logStorage.size + default: "2Gi" + description: "Log volume size" + type: string + group: "PV Settings" + label: Log Volume Size + - variable: federatorai.persistence.aiCore.dataStorage.size + default: "10Gi" + description: "AICore data volume Size" + type: string + group: "PV Settings" + label: AICore Data Volume Size + - variable: federatorai.persistence.influxdb.dataStorage.size + default: "100Gi" + description: "Influxdb data volume Size" + type: string + group: "PV Settings" + label: Influxdb Data Volume Size + - variable: federatorai.persistence.fedemeterInfluxdb.dataStorage.size + default: "10Gi" + description: "Fedemeter influxdb data volume Size" + type: string + group: "PV Settings" + label: Fedemeter Influxdb Data Volume Size diff --git a/charts/federatorai/federatorai/4.5.100/requirements.yaml b/charts/federatorai/federatorai/4.5.100/requirements.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/charts/federatorai/federatorai/4.5.100/templates/01-serviceaccount.yaml b/charts/federatorai/federatorai/4.5.100/templates/01-serviceaccount.yaml new file mode 100644 index 000000000..937627cd3 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/01-serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: federatorai-operator + namespace: {{ .Release.Namespace }} diff --git a/charts/federatorai/federatorai/4.5.100/templates/03-federatorai-operator.deployment.yaml b/charts/federatorai/federatorai/4.5.100/templates/03-federatorai-operator.deployment.yaml new file mode 100644 index 000000000..d7ca3e7f3 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/03-federatorai-operator.deployment.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: federatorai-operator + namespace: {{ .Release.Namespace }} + labels: + name: federatorai-operator + app: Federator.ai + annotations: + "helm.sh/hook-weight": "1000" +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + name: federatorai-operator + template: + metadata: + labels: + name: federatorai-operator + app: Federator.ai + spec: + securityContext: + fsGroup: 1001 + serviceAccountName: federatorai-operator + initContainers: + - name: upgrader + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: + - federatorai-operator + args: + - "upgrade" + env: + - name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FEDERATORAI_OPERATOR_INFLUXDB_ADDRESS + value: "" + - name: FEDERATORAI_OPERATOR_INFLUXDB_SERVICE_NAME + value: alameda-influxdb + - name: FEDERATORAI_OPERATOR_INFLUXDB_SERVICE_PORT + value: "8086" + - name: FEDERATORAI_OPERATOR_INFLUXDB_USERNAME + value: admin + - name: FEDERATORAI_OPERATOR_INFLUXDB_PASSWORD + value: adminpass + volumeMounts: + - mountPath: /var/log/alameda + name: log + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + containers: + - name: federatorai-operator + # Replace this with the built image name + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - federatorai-operator + env: + - name: NAMESPACE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "federatorai-operator" + - name: DISABLE_OPERAND_RESOURCE_PROTECTION + value: "false" + readinessProbe: + failureThreshold: 20 + httpGet: + path: /readyz + port: 8083 + initialDelaySeconds: 5 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + volumeMounts: + - mountPath: /var/log/alameda + name: log + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: log + emptyDir: {} + - name: cert + secret: + defaultMode: 420 + secretName: federatorai-operator-service-cert diff --git a/charts/federatorai/federatorai/4.5.100/templates/04-clusterrole.yaml b/charts/federatorai/federatorai/4.5.100/templates/04-clusterrole.yaml new file mode 100644 index 000000000..2703f91a2 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/04-clusterrole.yaml @@ -0,0 +1,257 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: federatorai-operator +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + - pods + verbs: + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - persistentvolumeclaims + - serviceaccounts + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + - pods/log + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - update +- apiGroups: + - "" + resources: + - services + verbs: + - create + - delete + - list + - update + - watch +- apiGroups: + - "" + - extensions + resources: + - replicationcontrollers + verbs: + - '*' +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - analysis.containers.ai + - autoscaling.containers.ai + - federatorai.containers.ai + - notifying.containers.ai + - tenant.containers.ai + resources: + - '*' + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - get + - update +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments/scale + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - apps + - extensions + resources: + - deployments + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - list +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + verbs: + - '*' +- apiGroups: + - extensions + - policy + resourceNames: + - federatorai-alameda-weave-scope + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - monitoring.coreos.com + resources: + - prometheuses + verbs: + - list +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list + - update +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - clusterroles/finalizers + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - create + - delete + - get + - list + - update + - use + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - list + - watch +- apiGroups: + - volumesnapshot.external-storage.k8s.io + resources: + - volumesnapshotdatas + - volumesnapshots + verbs: + - list + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alameda-gc + annotations: + "helm.sh/hook-weight": "5000" + "helm.sh/hook": post-install,post-delete +rules: [] diff --git a/charts/federatorai/federatorai/4.5.100/templates/05-clusterrolebinding.yaml b/charts/federatorai/federatorai/4.5.100/templates/05-clusterrolebinding.yaml new file mode 100644 index 000000000..7c55d4828 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/05-clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: federatorai-operator +subjects: +- kind: ServiceAccount + name: federatorai-operator + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: federatorai-operator + apiGroup: rbac.authorization.k8s.io diff --git a/charts/federatorai/federatorai/4.5.100/templates/06-role.yaml b/charts/federatorai/federatorai/4.5.100/templates/06-role.yaml new file mode 100644 index 000000000..6e53fba0b --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/06-role.yaml @@ -0,0 +1,107 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: federatorai-operator + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - persistentvolumeclaims + - pods + - secrets + - services + verbs: + - '*' +- apiGroups: + - "" + resources: + - nodes + - persistentvolumeclaims + - persistentvolumes + - pods/log + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - statefulsets + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - apps + - extensions + resources: + - deployments + - deployments/scale + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - deployments/scale + verbs: + - update +- apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - '*' +- apiGroups: + - extensions + resourceNames: + - federatorai-alameda-weave-scope + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - get +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - list + - watch +- apiGroups: + - volumesnapshot.external-storage.k8s.io + resources: + - volumesnapshotdatas + - volumesnapshots + verbs: + - list + - watch diff --git a/charts/federatorai/federatorai/4.5.100/templates/07-rolebinding.yaml b/charts/federatorai/federatorai/4.5.100/templates/07-rolebinding.yaml new file mode 100644 index 000000000..e72f197b5 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/07-rolebinding.yaml @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: federatorai-operator + namespace: {{ .Release.Namespace }} +subjects: +- kind: ServiceAccount + name: federatorai-operator + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: federatorai-operator + apiGroup: rbac.authorization.k8s.io diff --git a/charts/federatorai/federatorai/4.5.100/templates/08-service.yaml b/charts/federatorai/federatorai/4.5.100/templates/08-service.yaml new file mode 100644 index 000000000..843abc896 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/08-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + component: federatorai-operator + name: federatorai-operator-service + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: 443 + targetPort: 50443 + selector: + name: federatorai-operator + app: Federator.ai diff --git a/charts/federatorai/federatorai/4.5.100/templates/09-secret.yaml b/charts/federatorai/federatorai/4.5.100/templates/09-secret.yaml new file mode 100644 index 000000000..e4beb366a --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/09-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: federatorai-operator-service-cert + namespace: {{ .Release.Namespace }} +data: +type: Opaque \ No newline at end of file diff --git a/charts/federatorai/federatorai/4.5.100/templates/10-mutatingwebhook.yaml b/charts/federatorai/federatorai/4.5.100/templates/10-mutatingwebhook.yaml new file mode 100644 index 000000000..71d502abe --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/10-mutatingwebhook.yaml @@ -0,0 +1,24 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: federatorai-operator-servicesmutation +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: federatorai-operator-service + namespace: {{ .Release.Namespace }} + path: /mutate-federatorai-containers-ai-v1alpha1-alamedaservice + failurePolicy: Ignore + name: alamedaservicemutate.federatorai.containers.ai + rules: + - apiGroups: + - federatorai.containers.ai + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - alamedaservices \ No newline at end of file diff --git a/charts/federatorai/federatorai/4.5.100/templates/11-validatingwebhook.yaml b/charts/federatorai/federatorai/4.5.100/templates/11-validatingwebhook.yaml new file mode 100644 index 000000000..bf568d17d --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/11-validatingwebhook.yaml @@ -0,0 +1,24 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: federatorai-operator-servicesvalidation +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: federatorai-operator-service + namespace: {{ .Release.Namespace }} + path: /validate-federatorai-containers-ai-v1alpha1-alamedaservice + failurePolicy: Ignore + name: alamedaservicevalidate.federatorai.containers.ai + rules: + - apiGroups: + - federatorai.containers.ai + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - alamedaservices \ No newline at end of file diff --git a/charts/federatorai/federatorai/4.5.100/templates/NOTES.txt b/charts/federatorai/federatorai/4.5.100/templates/NOTES.txt new file mode 100644 index 000000000..b381d6fed --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/NOTES.txt @@ -0,0 +1,3 @@ + +Get the Federator.ai pods by running the following command: + kubectl --namespace {{ .Release.Namespace }} get pods diff --git a/charts/federatorai/federatorai/4.5.100/templates/_helpers.tpl b/charts/federatorai/federatorai/4.5.100/templates/_helpers.tpl new file mode 100644 index 000000000..66323e843 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/_helpers.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "federatorai-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "federatorai-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "federatorai-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "federatorai-operator.labels" -}} +app.kubernetes.io/name: {{ include "federatorai-operator.name" . }} +helm.sh/chart: {{ include "federatorai-operator.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/charts/federatorai/federatorai/4.5.100/templates/alamedaservice.yaml b/charts/federatorai/federatorai/4.5.100/templates/alamedaservice.yaml new file mode 100644 index 000000000..6fc00b321 --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/templates/alamedaservice.yaml @@ -0,0 +1,87 @@ +apiVersion: federatorai.containers.ai/v1alpha1 +kind: AlamedaService +metadata: + name: my-alamedaservice + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "3000" +spec: +{{ if .Values.federatorai.persistence.enabled }} + env: + - name: FEDERATORAI_MAXIMUM_LOG_SIZE + ## Use about 90% of 2Gi + value: "1931476992" +{{ end }} + keycode: + codeNumber: FFSFM-RGBTF-F7I7O-UFKPM-LL6VI-QVFNQ + version: {{ .Values.federatorai.version }} +{{ if .Values.federatorai.imageLocation }} + imageLocation: {{ .Values.federatorai.imageLocation }} +{{ else }} + imageLocation: quay.io/prophetstor +{{ end }} +{{ if .Values.federatorai.persistence.enabled }} + storages: + - accessModes: + - ReadWriteOnce + {{ if .Values.federatorai.persistence.storageClass }} + class: {{ .Values.federatorai.persistence.storageClass }} + {{ end }} + size: {{ .Values.federatorai.persistence.storages.logStorage.size }} + type: pvc + usage: log + alamedaAi: + storages: + - accessModes: + - ReadWriteOnce + {{ if .Values.federatorai.persistence.storageClass }} + class: {{ .Values.federatorai.persistence.storageClass }} + {{ end }} + size: {{ .Values.federatorai.persistence.aiCore.dataStorage.size }} + type: pvc + usage: data + alamedaInfluxdb: + storages: + - accessModes: + - ReadWriteOnce + {{ if .Values.federatorai.persistence.storageClass }} + class: {{ .Values.federatorai.persistence.storageClass }} + {{ end }} + size: {{ .Values.federatorai.persistence.influxdb.dataStorage.size }} + type: pvc + usage: data + fedemeterInfluxdb: + storages: + - accessModes: + - ReadWriteOnce + {{ if .Values.federatorai.persistence.storageClass }} + class: {{ .Values.federatorai.persistence.storageClass }} + {{ end }} + size: {{ .Values.federatorai.persistence.fedemeterInfluxdb.dataStorage.size }} + type: pvc + usage: data +{{ else }} + storages: + - type: ephemeral + usage: data + - type: ephemeral + usage: log +{{ end }} + serviceExposures: +{{ if .Values.services.dashboardFrontend.nodePort }} + - name: federatorai-dashboard-frontend + nodePort: + ports: + - nodePort: {{ .Values.services.dashboardFrontend.nodePort }} + port: 9001 + type: NodePort +{{ end }} +{{ if .Values.services.rest.nodePort }} + - name: federatorai-rest + nodePort: + ports: + - nodePort: {{ .Values.services.rest.nodePort }} + port: 5056 + type: NodePort +{{ end }} diff --git a/charts/federatorai/federatorai/4.5.100/values.yaml b/charts/federatorai/federatorai/4.5.100/values.yaml new file mode 100644 index 000000000..40414637a --- /dev/null +++ b/charts/federatorai/federatorai/4.5.100/values.yaml @@ -0,0 +1,41 @@ +## Default values for Federator.ai +## This is a YAML-formatted file. +## Declare variables to be passed into your templates. +## +image: + pullPolicy: IfNotPresent + repository: quay.io/prophetstor/federatorai-operator-ubi + tag: v4.5.1-ga + +## Set default values +## +federatorai: + imageLocation: quay.io/prophetstor + version: v4.5.1-ga + ## If thhe persistence is enabled, a default StorageClass + ## is needed in the k8s cluster to provision volumes. + persistence: + enabled: true + storageClass: "" + storages: + logStorage: + size: 2Gi + aiCore: + dataStorage: + size: 10Gi + influxdb: + dataStorage: + size: 100Gi + fedemeterInfluxdb: + dataStorage: + size: 10Gi + +services: + dashboardFrontend: + ## Specify the nodePort value for the dashboard frontend + ## Comment out the following line to disable nodePort service + nodePort: 31012 + rest: + ## Specify the nodePort value for the REST service + ## Comment out the following line to disable nodePort service + nodePort: 31011 diff --git a/charts/haproxy/haproxy/1.12.100/.helmignore b/charts/haproxy/haproxy/1.12.100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/haproxy/haproxy/1.12.100/Chart.yaml b/charts/haproxy/haproxy/1.12.100/Chart.yaml new file mode 100644 index 000000000..f29ee9c29 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy +apiVersion: v1 +appVersion: 1.5.1 +description: A Helm chart for HAProxy Kubernetes Ingress Controller +home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress +icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png +keywords: +- ingress +- haproxy +kubeVersion: '>=1.12.0-0' +maintainers: +- email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi +- email: bassmann@haproxy.com + name: Baptiste Assmann +- email: dkorunic@haproxy.com + name: Dinko Korunic +name: haproxy +sources: +- https://github.com/haproxytech/kubernetes-ingress +version: 1.12.100 diff --git a/charts/haproxy/haproxy/1.12.100/README.md b/charts/haproxy/haproxy/1.12.100/README.md new file mode 100644 index 000000000..d4f04dbb8 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/README.md @@ -0,0 +1,199 @@ +# ![HAProxy](https://github.com/haproxytech/kubernetes-ingress/raw/master/assets/images/haproxy-weblogo-210x49.png "HAProxy") + +## HAProxy Kubernetes Ingress Controller + +An ingress controller is a Kubernetes resource that routes traffic from outside your cluster to services within the cluster. HAProxy Kubernetes Ingress Controller uses ConfigMap to store the haproxy configuration. + +Detailed documentation can be found within the [Official Documentation](https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/). + +Additional configuration details can be found in [annotation reference](https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation) and in image [arguments reference](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md). + +## Introduction + +This chart bootstraps an HAProxy kubernetes-ingress deployment/daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +### Prerequisites + +- Kubernetes 1.12+ +- Helm 2.9+ + +## Before you begin + +### Setup a Kubernetes Cluster + +The quickest way to setup a Kubernetes cluster is with [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/), [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/) or [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) using their respective quick-start guides. + +For setting up Kubernetes on other cloud platforms or bare-metal servers refer to the Kubernetes [getting started guide](http://kubernetes.io/docs/getting-started-guides/). + +### Install Helm + +Get the latest [Helm release](https://github.com/helm/helm#install). + +### Add Helm chart repo + +Once you have Helm installed, add the repo as follows: + +```console +helm repo add haproxytech https://haproxytech.github.io/helm-charts +helm repo update +``` + +## Install the chart + +To install the chart with Helm v3 as *my-release* deployment: + +```console +helm install my-release haproxytech/kubernetes-ingress +``` + +***NOTE***: To install the chart with Helm v2 (legacy Helm) the syntax requires adding deployment name to `--name` parameter: + +```console +helm install haproxytech/kubernetes-ingress \ + --name my-release +``` + +### Installing with unique name + +To auto-generate controller and its resources names when installing, use the following: + +```console +helm install haproxytech/kubernetes-ingress \ + --generate-name +``` + +### Installing from a private registry + +To install the chart using a private registry for controller into a separate namespace *prod*. + +***NOTE***: Helm v3 requires namespace to be precreated (eg. with ```kubectl create namespace prod```) + +```console +helm install my-ingress haproxytech/kubernetes-ingress \ + --namespace prod \ + --set controller.image.tag=SOMETAG \ + --set controller.imageCredentials.registry=myregistry.domain.com \ + --set controller.imageCredentials.username=MYUSERNAME \ + --set controller.imageCredentials.password=MYPASSWORD +``` + +### Installing as DaemonSet + +Default controller mode is [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), but it is possible to use [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) as well: + +```console +helm install my-ingress2 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet +``` + +### Installing in multi-ingress environment + +It is also possible to set controller ingress class to be used in [multi-ingress environments](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/#using-multiple-ingress-controllers): + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy +``` + +***NOTE***: make sure your Ingress routes have corresponding `ingress.class: haproxy` annotation. + +### Installing with service annotations + +On some environments like EKS and GKE there might be a need to pass service annotations. Syntax can become a little tedious however: + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy \ + --set controller.service.type=LoadBalancer \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-internal"="0.0.0.0/0" \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled"="true" +``` + +***NOTE***: With helm `--set` it is needed to put quotes and escape dots in the annotation key and commas in the value string. + +### Installing with Horizontal Pod Autoscaler + +[HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) automatically scales number of replicas in Deployment or Replication Controller and adjusts replica count. Therefore we want to unset default replicaCount for controller and defaultBackend by setting corresponding key values to null: + +```console +helm install my-ingress4 haproxytech/kubernetes-ingress \ + --set controller.replicaCount=null \ + --set defaultBackend.replicaCount=null +``` + +### Installing the ServiceMonitor + +If you're using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), you can automatically install the `ServiceMonitor` definition in order to automate the scraping options according to your needs. + +```console +helm install my-ingress5 haproxytech/kubernetes-ingress \ + --set "controller.serviceMonitor.enabled=true" +``` + +### Using values from YAML file + +As opposed to using many `--set` invocations, much simpler approach is to define value overrides in a separate YAML file and specify them when invoking Helm: + +*mylb.yaml*: + +```yaml +controller: + kind: DaemonSet + ingressClass: haproxy + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +``` + +And invoking Helm becomes (compare to the previous example): + +```console +helm install my-ingress4 -f mylb.yml haproxytech/kubernetes-ingress +``` + +A typical YAML file for TCP services looks like (provided that configmap "[default/tcp](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md)" was created) : + +```yaml +controller: + service: + tcpPorts: + - name: mysql + port: 3306 + targetPort: 3306 + extraArgs: + - --configmap-tcp-services=default/tcp +``` + +## Upgrading the chart + +To upgrade the *my-release* deployment: + +```console +helm upgrade my-release haproxytech/kubernetes-ingress +``` + +## Uninstalling the chart + +To uninstall/delete the *my-release* deployment: + +```console +helm delete my-release +``` + +## Debugging + +It is possible to generate a set of YAML files for testing/debugging: + +```console +helm install my-release haproxytech/kubernetes-ingress \ + --debug \ + --dry-run +``` + +## Contributing + +We welcome all contributions. Please refer to [guidelines](../CONTRIBUTING.md) on how to make a contribution. diff --git a/charts/haproxy/haproxy/1.12.100/app-readme.md b/charts/haproxy/haproxy/1.12.100/app-readme.md new file mode 100644 index 000000000..aae3d1bd8 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/app-readme.md @@ -0,0 +1,8 @@ +# HAProxy +[HAProxy](https://www.haproxy.org/) is the world's fastest and most widely used software load balancer. HAProxy allows organizations to deliver websites and applications with the utmost performance, observability, and security at any scale and in any environment. + +# HAProxy Enterprise +[HAProxy Enterprise](https://www.haproxy.com/products/haproxy-enterprise-edition/) is an enterprise-class version of HAProxy providing a robust and reliable code base with cutting edge features, an enterprise suite of add-ons, expert support, and professional services. At its core, it incorporates feature backports from the HAProxy development branch for customers who require immediate access to the latest functionality in a hardened version of code. + +## Introduction +This chart bootstraps the [HAProxy Ingress Controller](https://github.com/haproxytech/kubernetes-ingress) or the [HAProxy Enterprise Ingress Controller](https://www.haproxy.com/products/haproxy-enterprise-kubernetes-ingress-controller/) using the [Helm](https://helm.sh) package manager. diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-customconfig-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-customconfig-values.yaml new file mode 100644 index 000000000..116158a14 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-customnodeport-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 000000000..c9de04c16 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-default-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-default-values.yaml new file mode 100644 index 000000000..ddb25623a --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-default-values.yaml @@ -0,0 +1,2 @@ +controller: + kind: DaemonSet diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-disableddefaultbackend-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-disableddefaultbackend-values.yaml new file mode 100644 index 000000000..3a1687a33 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-disableddefaultbackend-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet +defaultBackend: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..362fbb982 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-disabledsecretconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-enableports-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-enableports-values.yaml new file mode 100644 index 000000000..9a41dac52 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-enableports-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-extraargs-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-extraargs-values.yaml new file mode 100644 index 000000000..691acbc44 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-extraargs-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-hostport-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-hostport-values.yaml new file mode 100644 index 000000000..45042ea50 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-hostport-values.yaml @@ -0,0 +1,8 @@ +controller: + kind: DaemonSet + daemonset: + useHostPort: true + hostPorts: + http: 80 + https: 443 + stat: 1024 diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-nodeport-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-nodeport-values.yaml new file mode 100644 index 000000000..ebc8f1020 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-publishservice-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-publishservice-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-publishservice-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.12.100/ci/daemonset-serviceannotation-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/daemonset-serviceannotation-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/daemonset-serviceannotation-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-customconfig-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-customconfig-values.yaml new file mode 100644 index 000000000..12c48d22d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-customconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-customnodeport-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-customnodeport-values.yaml new file mode 100644 index 000000000..f044362aa --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-default-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-default-values.yaml new file mode 100644 index 000000000..792d60054 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-default-values.yaml @@ -0,0 +1 @@ +# diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-disableddefaultbackend-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-disableddefaultbackend-values.yaml new file mode 100644 index 000000000..ba2a61ebe --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-disableddefaultbackend-values.yaml @@ -0,0 +1,2 @@ +defaultBackend: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..767645997 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-disabledsecretconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-enableports-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-enableports-values.yaml new file mode 100644 index 000000000..03ff297b4 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-enableports-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-extraargs-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-extraargs-values.yaml new file mode 100644 index 000000000..d0e1dbe73 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-extraargs-values.yaml @@ -0,0 +1,3 @@ +controller: + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-hpa-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-hpa-values.yaml new file mode 100644 index 000000000..0c8326236 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-hpa-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: Deployment + autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +defaultBackend: + autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-nodeport-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-nodeport-values.yaml new file mode 100644 index 000000000..ffdc47b2d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-nodeport-values.yaml @@ -0,0 +1,3 @@ +controller: + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-psp-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-psp-values.yaml new file mode 100644 index 000000000..7aae8605d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-psp-values.yaml @@ -0,0 +1,2 @@ +podSecurityPolicy: + enabled: true diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-publishservice-values.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-publishservice-values.yaml new file mode 100644 index 000000000..6d8bf9bf7 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-publishservice-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + publishService: + enabled: true diff --git a/charts/haproxy/haproxy/1.12.100/ci/deployment-replicacount-unset.yaml b/charts/haproxy/haproxy/1.12.100/ci/deployment-replicacount-unset.yaml new file mode 100644 index 000000000..78ee30060 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/ci/deployment-replicacount-unset.yaml @@ -0,0 +1,5 @@ +controller: + replicaCount: null + +defaultBackend: + replicaCount: null diff --git a/charts/haproxy/haproxy/1.12.100/questions.yml b/charts/haproxy/haproxy/1.12.100/questions.yml new file mode 100644 index 000000000..27ff89e72 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/questions.yml @@ -0,0 +1,73 @@ +questions: +- variable: imageDefault + default: true + description: "Use default Docker image" + label: Use Default Image + type: boolean + group: "Settings" + show_subquestion_if: false + subquestions: + - variable: controller.image.tag + default: "1.4.6" + description: "HAProxy Ingress Controller Tag" + type: string + label: HAProxy Ingress Controller Tag +- variable: controller.kind + type: enum + options: + - "DaemonSet" + - "Deployment" + default: "Deployment" + description: "Deployment Type" + label: Deployment Type + group: "Settings" +- variable: controller.service.type + type: enum + options: + - "LoadBalancer" + - "NodePort" + default: "NodePort" + description: "Service Type for HAProxy Ingress Controller" + label: Service Type + group: "Settings" +- variable: controller.ingressClass + default: "" + description: "Ingress Class for targeting this controller" + label: Ingress Class + type: string + group: "Settings" +- variable: controller.defaultTLSSecret.secret + default: "" + description: "Default TLS certificate secret" + label: TLS Certificate Secret + type: string + group: "Settings" +- variable: enableEnterprise + default: false + description: "Use HAProxy Enterprise" + label: Enable + type: boolean + group: "HAProxy Enterprise" + show_subquestion_if: true + subquestions: + - variable: controller.imageCredentials.registry + type: string + default: "kubernetes-registry.haproxy.com" + description: "HAProxy Enterprise Registtry" + label: Registry + - variable: controller.image.repository + type: string + default: "kubernetes-registry.haproxy.com/hapee-ingress" + description: "HAProxy Enterprise Registry" + label: Repository + - variable: controller.imageCredentials.username + type: string + default: "MYUSERNAME" + description: "HAProxy Enterprise Username" + label: Username + - variable: controller.imageCredentials.password + type: string + default: "MYPASSWORD" + description: "HAProxy Enterprise Password" + label: Password + diff --git a/charts/haproxy/haproxy/1.12.100/templates/NOTES.txt b/charts/haproxy/haproxy/1.12.100/templates/NOTES.txt new file mode 100644 index 000000000..522e23017 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/NOTES.txt @@ -0,0 +1,67 @@ +HAProxy Kubernetes Ingress Controller has been successfully installed. + +Controller image deployed is: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}". +Your controller is of a "{{ .Values.controller.kind }}" kind. Your controller service is running as a "{{ .Values.controller.service.type }}" type. +{{- if and .Values.rbac.create}} +RBAC authorization is enabled. +{{- else}} +RBAC authorization is disabled. +{{- end}} +{{- if .Values.controller.ingressClass}} +Controller ingress.class is set to "{{ .Values.controller.ingressClass }}" so make sure to use same annotation for +Ingress resource. +{{- end}} + +Service ports mapped are: +{{- if eq .Values.controller.kind "Deployment" }} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + hostPort: {{ index $hostPorts $key | default $value }} +{{- end }} +{{- end }} + +Node IP can be found with: + $ kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}" + +The following ingress resource routes traffic to pods that match the following: + * service name: web + * client's Host header: webdemo.com + * path begins with / + + --- + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + name: web-ingress + namespace: default + spec: + rules: + - host: webdemo.com + http: + paths: + - path: / + backend: + serviceName: web + servicePort: 80 + +In case that you are using multi-ingress controller environment, make sure to use ingress.class annotation and match it +with helm chart option controller.ingressClass. + +For more examples and up to date documentation, please visit: + * Helm chart documentation: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + * Controller documentation: https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/ + * Annotation reference: https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation + * Image parameters reference: https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md + + + diff --git a/charts/haproxy/haproxy/1.12.100/templates/_helpers.tpl b/charts/haproxy/haproxy/1.12.100/templates/_helpers.tpl new file mode 100644 index 000000000..5a1e28588 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/_helpers.tpl @@ -0,0 +1,130 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubernetes-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Encode an imagePullSecret string. +*/}} +{{- define "kubernetes-ingress.imagePullSecret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.controller.imageCredentials.registry (printf "%s:%s" .Values.controller.imageCredentials.username .Values.controller.imageCredentials.password | b64enc) | b64enc }} +{{- end }} + +{{/* +Generate default certificate for HAProxy. +*/}} +{{- define "kubernetes-ingress.gen-certs" -}} +{{- $ca := genCA "kubernetes-ingress-ca" 365 -}} +{{- $cn := printf "%s.%s" .Release.Name .Release.Namespace -}} +{{- $cert := genSignedCert $cn nil nil 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Create the name of the controller service account to use. +*/}} +{{- define "kubernetes-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kubernetes-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "kubernetes-ingress.defaultBackend.serviceAccountName" -}} +{{- if or .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +*/}} +{{- define "kubernetes-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default cert secret name. +*/}} +{{- define "kubernetes-ingress.defaultTLSSecret.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) "default-cert" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. +By default this will use the / matching the controller's service name. +Users can provide an override for an explicit service they want to use via `.Values.controller.publishService.pathOverride` +*/}} +{{- define "kubernetes-ingress.publishServicePath" -}} +{{- $defServicePath := printf "%s/%s" .Release.Namespace (include "kubernetes-ingress.fullname" .) -}} +{{- $servicePath := default $defServicePath .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the syslog-server annotation +*/}} +{{- define "kubernetes-ingress.syslogServer" -}} +{{- range $key, $val := .Values.controller.logging.traffic -}} +{{- printf "%s:%s, " $key $val }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified ServiceMonitor name. +*/}} +{{- define "kubernetes-ingress.serviceMonitorName" -}} +{{- default (include "kubernetes-ingress.fullname" .) .Values.controller.serviceMonitor.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/clusterrole.yaml b/charts/haproxy/haproxy/1.12.100/templates/clusterrole.yaml new file mode 100644 index 000000000..4f9b4734a --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/clusterrole.yaml @@ -0,0 +1,60 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - services + - namespaces + - events + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses + - ingresses/status + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/clusterrolebinding.yaml b/charts/haproxy/haproxy/1.12.100/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..cfd226083 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/clusterrolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} + diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-configmap.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-configmap.yaml new file mode 100644 index 000000000..94aa9c554 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-configmap.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +data: +{{- if .Values.controller.logging.traffic }} + syslog-server: {{ template "kubernetes-ingress.syslogServer" . }} +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-daemonset.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-daemonset.yaml new file mode 100644 index 000000000..e892c4b8d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-daemonset.yaml @@ -0,0 +1,234 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork -}} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.extraLabels }} +{{ toYaml .Values.controller.extraLabels | indent 4 }} + {{- end }} +spec: + minReadySeconds: 0 + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if $useHostNetwork }} + hostNetwork: true + {{- end }} +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if and .Values.controller.defaultTLSSecret.enabled -}} +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} +{{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + {{- if .Values.controller.unprivileged }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .port }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ .port }} + {{- end }} + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + startupProbe: + failureThreshold: {{ .Values.controller.startupProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.startupProbe.path }} + port: {{ .Values.controller.startupProbe.port }} + scheme: {{ .Values.controller.startupProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.startupProbe.periodSeconds }} + successThreshold: {{ .Values.controller.startupProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.startupProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range .Values.controller.extraEnvs }} + - name: "{{ .name }}" + value: "{{ .value }}" + {{- end }} + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- if .Values.controller.lifecycle }} + lifecycle: + {{- if eq "string" (printf "%T" .Values.controller.lifecycle) }} +{{ tpl .Values.controller.lifecycle . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + volumeMounts: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumeMounts) }} +{{ tpl .Values.controller.extraVolumeMounts . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12 }} + {{- end }} + {{- end}} + {{- if .Values.controller.extraContainers }} + {{- if eq "string" (printf "%T" .Values.controller.extraContainers) }} +{{ tpl .Values.controller.extraContainers . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraContainers | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + volumes: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumes) }} +{{ tpl .Values.controller.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if or .Values.controller.unprivileged .Values.controller.initContainers }} + initContainers: + {{- if .Values.controller.unprivileged }} + - name: sysctl + image: busybox:musl + command: + - /bin/sh + - -c + - sysctl -w net.ipv4.ip_unprivileged_port_start=0 + securityContext: + privileged: true + {{- end }} + {{- with.Values.controller.initContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-defaultcertsecret.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-defaultcertsecret.yaml new file mode 100644 index 000000000..b409c7b25 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-defaultcertsecret.yaml @@ -0,0 +1,35 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.defaultTLSSecret.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +data: +{{ ( include "kubernetes-ingress.gen-certs" . ) | indent 2 }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-deployment.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-deployment.yaml new file mode 100644 index 000000000..2868add91 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-deployment.yaml @@ -0,0 +1,222 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "Deployment" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.extraLabels }} +{{ toYaml .Values.controller.extraLabels | indent 4 }} + {{- end }} +spec: + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.controller.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} +{{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + {{- if .Values.controller.unprivileged }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .targetPort }} + protocol: TCP + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + startupProbe: + failureThreshold: {{ .Values.controller.startupProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.startupProbe.path }} + port: {{ .Values.controller.startupProbe.port }} + scheme: {{ .Values.controller.startupProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.startupProbe.periodSeconds }} + successThreshold: {{ .Values.controller.startupProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.startupProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range .Values.controller.extraEnvs }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- if .Values.controller.lifecycle }} + lifecycle: + {{- if eq "string" (printf "%T" .Values.controller.lifecycle) }} +{{ tpl .Values.controller.lifecycle . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + volumeMounts: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumeMounts) }} +{{ tpl .Values.controller.extraVolumeMounts . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12 }} + {{- end }} + {{- end}} + {{- if .Values.controller.extraContainers }} + {{- if eq "string" (printf "%T" .Values.controller.extraContainers) }} +{{ tpl .Values.controller.extraContainers . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraContainers | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + volumes: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumes) }} +{{ tpl .Values.controller.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if or .Values.controller.unprivileged .Values.controller.initContainers }} + initContainers: + {{- if .Values.controller.unprivileged }} + - name: sysctl + image: busybox:musl + command: + - /bin/sh + - -c + - sysctl -w net.ipv4.ip_unprivileged_port_start=0 + securityContext: + privileged: true + {{- end }} + {{- with.Values.controller.initContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-hpa.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-hpa.yaml new file mode 100644 index 000000000..102b23439 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-hpa.yaml @@ -0,0 +1,49 @@ +{{/* +Copyright 2020 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and (eq .Values.controller.kind "Deployment") .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "kubernetes-ingress.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- if .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-podsecuritypolicy.yaml new file mode 100644 index 000000000..7851e2acf --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-podsecuritypolicy.yaml @@ -0,0 +1,80 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.fullname" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 +{{- if $useHostNetwork }} + hostNetwork: true +{{- end }} +{{- if or $useHostPort $useHostNetwork }} + hostPorts: +{{- range $key, $value := .Values.controller.containerPort }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- range .Values.controller.service.tcpPorts }} + - min: {{ .port }} + max: {{ .port }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-pullsecret.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-pullsecret.yaml new file mode 100644 index 000000000..88252394c --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-pullsecret.yaml @@ -0,0 +1,32 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.imageCredentials.registry }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "kubernetes-ingress.imagePullSecret" . }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-role.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-role.yaml new file mode 100644 index 000000000..3e41df6e4 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-rolebinding.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-rolebinding.yaml new file mode 100644 index 000000000..40404a401 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-service.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-service.yaml new file mode 100644 index 000000000..eb2eea381 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + annotations: +{{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} +{{- end }} +spec: + {{ with .Values.controller.service.clusterIP }}clusterIP: {{ . }}{{ end }} + type: {{ .Values.controller.service.type }} + {{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} + {{- end }} + ports: + {{- if .Values.controller.service.enablePorts.http }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if .Values.controller.service.nodePorts.http }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.https }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if .Values.controller.service.nodePorts.https }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.stat }} + - name: stat + port: {{ .Values.controller.service.ports.stat }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.stat }} + {{- if .Values.controller.service.nodePorts.stat }} + nodePort: {{ .Values.controller.service.nodePorts.stat }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + port: {{ .port }} + protocol: TCP + targetPort: {{ .targetPort }} + {{- if .nodePort }} + nodePort: {{ .nodePort }} + {{- end }} + {{- end }} + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} + {{- end }} + externalIPs: +{{- if .Values.controller.service.externalIPs }} +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end -}} +{{- if (eq .Values.controller.service.type "LoadBalancer") }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-serviceaccount.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-serviceaccount.yaml new file mode 100644 index 000000000..c90710990 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/controller-servicemonitor.yaml b/charts/haproxy/haproxy/1.12.100/templates/controller-servicemonitor.yaml new file mode 100644 index 000000000..0f4c2c3af --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/controller-servicemonitor.yaml @@ -0,0 +1,41 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kubernetes-ingress.serviceMonitorName" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.serviceMonitor.extraLabels }} + {{ toYaml .Values.controller.serviceMonitor.extraLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + {{ .Values.controller.serviceMonitor.endpoints | toYaml | nindent 4 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-deployment.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-deployment.yaml new file mode 100644 index 000000000..9331f5f35 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-deployment.yaml @@ -0,0 +1,84 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.defaultBackend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + {{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podAnnotations }} + annotations: +{{ toYaml .Values.defaultBackend.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.containerPort }} + protocol: TCP + {{- if .Values.defaultBackend.extraEnvs }} + env: + {{- range .Values.defaultBackend.extraEnvs }} + - name: "{{ .name }}" + value: "{{ .value }}" + {{- end }} + {{- end }} + resources: + {{- toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- with .Values.defaultBackend.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.defaultBackend.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 + {{- with .Values.defaultBackend.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-hpa.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-hpa.yaml new file mode 100644 index 000000000..0fd8a65b7 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-hpa.yaml @@ -0,0 +1,49 @@ +{{/* +Copyright 2020 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.defaultBackend.autoscaling.enabled .Values.defaultBackend.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: + {{- if .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-podsecuritypolicy.yaml new file mode 100644 index 000000000..82397b57b --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-podsecuritypolicy.yaml @@ -0,0 +1,64 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + hostNetwork: false + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-role.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-role.yaml new file mode 100644 index 000000000..8475d04fc --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-rolebinding.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-rolebinding.yaml new file mode 100644 index 000000000..3a94e9418 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-service.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-service.yaml new file mode 100644 index 000000000..6e0cf0e98 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-service.yaml @@ -0,0 +1,40 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.defaultBackend.service.port }} + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.100/templates/default-backend-serviceaccount.yaml b/charts/haproxy/haproxy/1.12.100/templates/default-backend-serviceaccount.yaml new file mode 100644 index 000000000..3c0853b14 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.100/values.yaml b/charts/haproxy/haproxy/1.12.100/values.yaml new file mode 100644 index 000000000..0ca2b7932 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.100/values.yaml @@ -0,0 +1,157 @@ +controller: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 20 + minReplicas: 2 + targetCPUUtilizationPercentage: 80 + config: {} + containerPort: + http: 80 + https: 443 + stat: 1024 + daemonset: + hostPorts: + http: 80 + https: 443 + stat: 1024 + useHostNetwork: false + useHostPort: false + defaultTLSSecret: + enabled: true + secret: null + dnsConfig: {} + dnsPolicy: ClusterFirst + extraArgs: [] + extraContainers: [] + extraEnvs: [] + extraLabels: {} + extraVolumeMounts: [] + extraVolumes: [] + image: + pullPolicy: IfNotPresent + repository: haproxytech/kubernetes-ingress + tag: '{{ .Chart.AppVersion }}' + imageCredentials: + password: null + registry: null + username: null + ingressClass: null + initContainers: [] + kind: Deployment + lifecycle: {} + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + logging: + level: info + traffic: {} + name: controller + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + publishService: + enabled: false + pathOverride: "" + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + replicaCount: 2 + resources: + requests: + cpu: 100m + memory: 64Mi + service: + annotations: {} + enablePorts: + http: true + https: true + stat: true + enabled: true + externalIPs: [] + healthCheckNodePort: 0 + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: {} + ports: + http: 80 + https: 443 + stat: 1024 + targetPorts: + http: http + https: https + stat: stat + tcpPorts: [] + type: NodePort + serviceMonitor: + enabled: false + endpoints: + - path: /metrics + port: stat + scheme: http + extraLabels: {} + startupProbe: + failureThreshold: 20 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 1 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + strategy: {} + terminationGracePeriodSeconds: 60 + tolerations: [] + unprivileged: false +defaultBackend: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 2 + minReplicas: 1 + targetCPUUtilizationPercentage: 80 + containerPort: 8080 + enabled: true + extraEnvs: [] + image: + pullPolicy: IfNotPresent + repository: k8s.gcr.io/defaultbackend-amd64 + runAsUser: 65534 + tag: 1.5 + name: default-backend + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + replicaCount: 2 + resources: + requests: + cpu: 10m + memory: 16Mi + service: + port: 8080 + serviceAccount: + create: true + tolerations: [] +podSecurityPolicy: + annotations: {} + enabled: false +rbac: + create: true +serviceAccount: + create: true + name: null diff --git a/charts/haproxy/haproxy/1.12.500/.helmignore b/charts/haproxy/haproxy/1.12.500/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/haproxy/haproxy/1.12.500/Chart.yaml b/charts/haproxy/haproxy/1.12.500/Chart.yaml new file mode 100644 index 000000000..323874d61 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy +apiVersion: v1 +appVersion: 1.5.4 +description: A Helm chart for HAProxy Kubernetes Ingress Controller +home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress +icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png +keywords: +- ingress +- haproxy +kubeVersion: '>=1.12.0-0' +maintainers: +- email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi +- email: bassmann@haproxy.com + name: Baptiste Assmann +- email: dkorunic@haproxy.com + name: Dinko Korunic +name: haproxy +sources: +- https://github.com/haproxytech/kubernetes-ingress +version: 1.12.500 diff --git a/charts/haproxy/haproxy/1.12.500/README.md b/charts/haproxy/haproxy/1.12.500/README.md new file mode 100644 index 000000000..c7534e846 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/README.md @@ -0,0 +1,208 @@ +# ![HAProxy](https://github.com/haproxytech/kubernetes-ingress/raw/master/assets/images/haproxy-weblogo-210x49.png "HAProxy") + +## HAProxy Kubernetes Ingress Controller + +An ingress controller is a Kubernetes resource that routes traffic from outside your cluster to services within the cluster. HAProxy Kubernetes Ingress Controller uses ConfigMap to store the haproxy configuration. + +Detailed documentation can be found within the [Official Documentation](https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/). + +Additional configuration details can be found in [annotation reference](https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation) and in image [arguments reference](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md). + +## Introduction + +This chart bootstraps an HAProxy kubernetes-ingress deployment/daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +### Prerequisites + +- Kubernetes 1.12+ +- Helm 2.9+ + +## Before you begin + +### Setup a Kubernetes Cluster + +The quickest way to setup a Kubernetes cluster is with [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/), [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/) or [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) using their respective quick-start guides. + +For setting up Kubernetes on other cloud platforms or bare-metal servers refer to the Kubernetes [getting started guide](http://kubernetes.io/docs/getting-started-guides/). + +### Install Helm + +Get the latest [Helm release](https://github.com/helm/helm#install). + +### Add Helm chart repo + +Once you have Helm installed, add the repo as follows: + +```console +helm repo add haproxytech https://haproxytech.github.io/helm-charts +helm repo update +``` + +## Install the chart + +To install the chart with Helm v3 as *my-release* deployment: + +```console +helm install my-release haproxytech/kubernetes-ingress +``` + +***NOTE***: To install the chart with Helm v2 (legacy Helm) the syntax requires adding deployment name to `--name` parameter: + +```console +helm install haproxytech/kubernetes-ingress \ + --name my-release +``` + +### Installing with unique name + +To auto-generate controller and its resources names when installing, use the following: + +```console +helm install haproxytech/kubernetes-ingress \ + --generate-name +``` + +### Installing from a private registry + +To install the chart using a private registry for controller into a separate namespace *prod*. + +***NOTE***: Helm v3 requires namespace to be precreated (eg. with ```kubectl create namespace prod```) + +```console +helm install my-ingress haproxytech/kubernetes-ingress \ + --namespace prod \ + --set controller.image.tag=SOMETAG \ + --set controller.imageCredentials.registry=myregistry.domain.com \ + --set controller.imageCredentials.username=MYUSERNAME \ + --set controller.imageCredentials.password=MYPASSWORD +``` + +Alternatively, use a pre-configured (existing) imagePullSecret in the same namespace: + +```console +helm install my-ingress haproxytech/kubernetes-ingress \ + --namespace prod \ + --set controller.image.tag=SOMETAG \ + --set controller.existingImagePullSecret name-of-existing-image-pull-secret +``` + +### Installing as DaemonSet + +Default controller mode is [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), but it is possible to use [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) as well: + +```console +helm install my-ingress2 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet +``` + +### Installing in multi-ingress environment + +It is also possible to set controller ingress class to be used in [multi-ingress environments](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/#using-multiple-ingress-controllers): + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy +``` + +***NOTE***: make sure your Ingress routes have corresponding `ingress.class: haproxy` annotation. + +### Installing with service annotations + +On some environments like EKS and GKE there might be a need to pass service annotations. Syntax can become a little tedious however: + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy \ + --set controller.service.type=LoadBalancer \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-internal"="0.0.0.0/0" \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled"="true" +``` + +***NOTE***: With helm `--set` it is needed to put quotes and escape dots in the annotation key and commas in the value string. + +### Installing with Horizontal Pod Autoscaler + +[HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) automatically scales number of replicas in Deployment or Replication Controller and adjusts replica count. Therefore we want to unset default replicaCount for controller and defaultBackend by setting corresponding key values to null: + +```console +helm install my-ingress4 haproxytech/kubernetes-ingress \ + --set controller.replicaCount=null \ + --set defaultBackend.replicaCount=null +``` + +### Installing the ServiceMonitor + +If you're using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), you can automatically install the `ServiceMonitor` definition in order to automate the scraping options according to your needs. + +```console +helm install my-ingress5 haproxytech/kubernetes-ingress \ + --set "controller.serviceMonitor.enabled=true" +``` + +### Using values from YAML file + +As opposed to using many `--set` invocations, much simpler approach is to define value overrides in a separate YAML file and specify them when invoking Helm: + +*mylb.yaml*: + +```yaml +controller: + kind: DaemonSet + ingressClass: haproxy + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +``` + +And invoking Helm becomes (compare to the previous example): + +```console +helm install my-ingress4 -f mylb.yml haproxytech/kubernetes-ingress +``` + +A typical YAML file for TCP services looks like (provided that configmap "[default/tcp](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md)" was created) : + +```yaml +controller: + service: + tcpPorts: + - name: mysql + port: 3306 + targetPort: 3306 + extraArgs: + - --configmap-tcp-services=default/tcp +``` + +## Upgrading the chart + +To upgrade the *my-release* deployment: + +```console +helm upgrade my-release haproxytech/kubernetes-ingress +``` + +## Uninstalling the chart + +To uninstall/delete the *my-release* deployment: + +```console +helm delete my-release +``` + +## Debugging + +It is possible to generate a set of YAML files for testing/debugging: + +```console +helm install my-release haproxytech/kubernetes-ingress \ + --debug \ + --dry-run +``` + +## Contributing + +We welcome all contributions. Please refer to [guidelines](../CONTRIBUTING.md) on how to make a contribution. diff --git a/charts/haproxy/haproxy/1.12.500/app-readme.md b/charts/haproxy/haproxy/1.12.500/app-readme.md new file mode 100644 index 000000000..aae3d1bd8 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/app-readme.md @@ -0,0 +1,8 @@ +# HAProxy +[HAProxy](https://www.haproxy.org/) is the world's fastest and most widely used software load balancer. HAProxy allows organizations to deliver websites and applications with the utmost performance, observability, and security at any scale and in any environment. + +# HAProxy Enterprise +[HAProxy Enterprise](https://www.haproxy.com/products/haproxy-enterprise-edition/) is an enterprise-class version of HAProxy providing a robust and reliable code base with cutting edge features, an enterprise suite of add-ons, expert support, and professional services. At its core, it incorporates feature backports from the HAProxy development branch for customers who require immediate access to the latest functionality in a hardened version of code. + +## Introduction +This chart bootstraps the [HAProxy Ingress Controller](https://github.com/haproxytech/kubernetes-ingress) or the [HAProxy Enterprise Ingress Controller](https://www.haproxy.com/products/haproxy-enterprise-kubernetes-ingress-controller/) using the [Helm](https://helm.sh) package manager. diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-customconfig-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-customconfig-values.yaml new file mode 100644 index 000000000..116158a14 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-customnodeport-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 000000000..c9de04c16 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-default-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-default-values.yaml new file mode 100644 index 000000000..ddb25623a --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-default-values.yaml @@ -0,0 +1,2 @@ +controller: + kind: DaemonSet diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-disableddefaultbackend-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-disableddefaultbackend-values.yaml new file mode 100644 index 000000000..3a1687a33 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-disableddefaultbackend-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet +defaultBackend: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..362fbb982 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-disabledsecretconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-enableports-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-enableports-values.yaml new file mode 100644 index 000000000..9a41dac52 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-enableports-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-extraargs-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-extraargs-values.yaml new file mode 100644 index 000000000..691acbc44 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-extraargs-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-hostport-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-hostport-values.yaml new file mode 100644 index 000000000..45042ea50 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-hostport-values.yaml @@ -0,0 +1,8 @@ +controller: + kind: DaemonSet + daemonset: + useHostPort: true + hostPorts: + http: 80 + https: 443 + stat: 1024 diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-nodeport-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-nodeport-values.yaml new file mode 100644 index 000000000..ebc8f1020 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-publishservice-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-publishservice-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-publishservice-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.12.500/ci/daemonset-serviceannotation-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/daemonset-serviceannotation-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/daemonset-serviceannotation-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-customconfig-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-customconfig-values.yaml new file mode 100644 index 000000000..12c48d22d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-customconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-customnodeport-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-customnodeport-values.yaml new file mode 100644 index 000000000..f044362aa --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-default-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-default-values.yaml new file mode 100644 index 000000000..792d60054 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-default-values.yaml @@ -0,0 +1 @@ +# diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-disableddefaultbackend-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-disableddefaultbackend-values.yaml new file mode 100644 index 000000000..ba2a61ebe --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-disableddefaultbackend-values.yaml @@ -0,0 +1,2 @@ +defaultBackend: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..767645997 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-disabledsecretconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-enableports-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-enableports-values.yaml new file mode 100644 index 000000000..03ff297b4 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-enableports-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-extraargs-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-extraargs-values.yaml new file mode 100644 index 000000000..d0e1dbe73 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-extraargs-values.yaml @@ -0,0 +1,3 @@ +controller: + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-hpa-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-hpa-values.yaml new file mode 100644 index 000000000..0c8326236 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-hpa-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: Deployment + autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + +defaultBackend: + autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-nodeport-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-nodeport-values.yaml new file mode 100644 index 000000000..ffdc47b2d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-nodeport-values.yaml @@ -0,0 +1,3 @@ +controller: + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-psp-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-psp-values.yaml new file mode 100644 index 000000000..7aae8605d --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-psp-values.yaml @@ -0,0 +1,2 @@ +podSecurityPolicy: + enabled: true diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-publishservice-values.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-publishservice-values.yaml new file mode 100644 index 000000000..6d8bf9bf7 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-publishservice-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + publishService: + enabled: true diff --git a/charts/haproxy/haproxy/1.12.500/ci/deployment-replicacount-unset.yaml b/charts/haproxy/haproxy/1.12.500/ci/deployment-replicacount-unset.yaml new file mode 100644 index 000000000..78ee30060 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/ci/deployment-replicacount-unset.yaml @@ -0,0 +1,5 @@ +controller: + replicaCount: null + +defaultBackend: + replicaCount: null diff --git a/charts/haproxy/haproxy/1.12.500/questions.yml b/charts/haproxy/haproxy/1.12.500/questions.yml new file mode 100644 index 000000000..298326086 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/questions.yml @@ -0,0 +1,73 @@ +questions: +- variable: imageDefault + default: true + description: "Use default Docker image" + label: Use Default Image + type: boolean + group: "Settings" + show_subquestion_if: false + subquestions: + - variable: controller.image.tag + default: "1.5.4" + description: "HAProxy Ingress Controller Tag" + type: string + label: HAProxy Ingress Controller Tag +- variable: controller.kind + type: enum + options: + - "DaemonSet" + - "Deployment" + default: "Deployment" + description: "Deployment Type" + label: Deployment Type + group: "Settings" +- variable: controller.service.type + type: enum + options: + - "LoadBalancer" + - "NodePort" + default: "NodePort" + description: "Service Type for HAProxy Ingress Controller" + label: Service Type + group: "Settings" +- variable: controller.ingressClass + default: "" + description: "Ingress Class for targeting this controller" + label: Ingress Class + type: string + group: "Settings" +- variable: controller.defaultTLSSecret.secret + default: "" + description: "Default TLS certificate secret" + label: TLS Certificate Secret + type: string + group: "Settings" +- variable: enableEnterprise + default: false + description: "Use HAProxy Enterprise" + label: Enable + type: boolean + group: "HAProxy Enterprise" + show_subquestion_if: true + subquestions: + - variable: controller.imageCredentials.registry + type: string + default: "kubernetes-registry.haproxy.com" + description: "HAProxy Enterprise Registtry" + label: Registry + - variable: controller.image.repository + type: string + default: "kubernetes-registry.haproxy.com/hapee-ingress" + description: "HAProxy Enterprise Registry" + label: Repository + - variable: controller.imageCredentials.username + type: string + default: "MYUSERNAME" + description: "HAProxy Enterprise Username" + label: Username + - variable: controller.imageCredentials.password + type: string + default: "MYPASSWORD" + description: "HAProxy Enterprise Password" + label: Password + diff --git a/charts/haproxy/haproxy/1.12.500/templates/NOTES.txt b/charts/haproxy/haproxy/1.12.500/templates/NOTES.txt new file mode 100644 index 000000000..522e23017 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/NOTES.txt @@ -0,0 +1,67 @@ +HAProxy Kubernetes Ingress Controller has been successfully installed. + +Controller image deployed is: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}". +Your controller is of a "{{ .Values.controller.kind }}" kind. Your controller service is running as a "{{ .Values.controller.service.type }}" type. +{{- if and .Values.rbac.create}} +RBAC authorization is enabled. +{{- else}} +RBAC authorization is disabled. +{{- end}} +{{- if .Values.controller.ingressClass}} +Controller ingress.class is set to "{{ .Values.controller.ingressClass }}" so make sure to use same annotation for +Ingress resource. +{{- end}} + +Service ports mapped are: +{{- if eq .Values.controller.kind "Deployment" }} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + hostPort: {{ index $hostPorts $key | default $value }} +{{- end }} +{{- end }} + +Node IP can be found with: + $ kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}" + +The following ingress resource routes traffic to pods that match the following: + * service name: web + * client's Host header: webdemo.com + * path begins with / + + --- + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + name: web-ingress + namespace: default + spec: + rules: + - host: webdemo.com + http: + paths: + - path: / + backend: + serviceName: web + servicePort: 80 + +In case that you are using multi-ingress controller environment, make sure to use ingress.class annotation and match it +with helm chart option controller.ingressClass. + +For more examples and up to date documentation, please visit: + * Helm chart documentation: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + * Controller documentation: https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/ + * Annotation reference: https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation + * Image parameters reference: https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md + + + diff --git a/charts/haproxy/haproxy/1.12.500/templates/_helpers.tpl b/charts/haproxy/haproxy/1.12.500/templates/_helpers.tpl new file mode 100644 index 000000000..5a1e28588 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/_helpers.tpl @@ -0,0 +1,130 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubernetes-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Encode an imagePullSecret string. +*/}} +{{- define "kubernetes-ingress.imagePullSecret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.controller.imageCredentials.registry (printf "%s:%s" .Values.controller.imageCredentials.username .Values.controller.imageCredentials.password | b64enc) | b64enc }} +{{- end }} + +{{/* +Generate default certificate for HAProxy. +*/}} +{{- define "kubernetes-ingress.gen-certs" -}} +{{- $ca := genCA "kubernetes-ingress-ca" 365 -}} +{{- $cn := printf "%s.%s" .Release.Name .Release.Namespace -}} +{{- $cert := genSignedCert $cn nil nil 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Create the name of the controller service account to use. +*/}} +{{- define "kubernetes-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kubernetes-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "kubernetes-ingress.defaultBackend.serviceAccountName" -}} +{{- if or .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +*/}} +{{- define "kubernetes-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default cert secret name. +*/}} +{{- define "kubernetes-ingress.defaultTLSSecret.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) "default-cert" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. +By default this will use the / matching the controller's service name. +Users can provide an override for an explicit service they want to use via `.Values.controller.publishService.pathOverride` +*/}} +{{- define "kubernetes-ingress.publishServicePath" -}} +{{- $defServicePath := printf "%s/%s" .Release.Namespace (include "kubernetes-ingress.fullname" .) -}} +{{- $servicePath := default $defServicePath .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the syslog-server annotation +*/}} +{{- define "kubernetes-ingress.syslogServer" -}} +{{- range $key, $val := .Values.controller.logging.traffic -}} +{{- printf "%s:%s, " $key $val }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified ServiceMonitor name. +*/}} +{{- define "kubernetes-ingress.serviceMonitorName" -}} +{{- default (include "kubernetes-ingress.fullname" .) .Values.controller.serviceMonitor.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/clusterrole.yaml b/charts/haproxy/haproxy/1.12.500/templates/clusterrole.yaml new file mode 100644 index 000000000..4f9b4734a --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/clusterrole.yaml @@ -0,0 +1,60 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - services + - namespaces + - events + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses + - ingresses/status + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + verbs: + - update +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/clusterrolebinding.yaml b/charts/haproxy/haproxy/1.12.500/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..cfd226083 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/clusterrolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} + diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-configmap.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-configmap.yaml new file mode 100644 index 000000000..94aa9c554 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-configmap.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +data: +{{- if .Values.controller.logging.traffic }} + syslog-server: {{ template "kubernetes-ingress.syslogServer" . }} +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-daemonset.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-daemonset.yaml new file mode 100644 index 000000000..5d2ae478a --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-daemonset.yaml @@ -0,0 +1,237 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork -}} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.extraLabels }} +{{ toYaml .Values.controller.extraLabels | indent 4 }} + {{- end }} +spec: + minReadySeconds: 0 + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if $useHostNetwork }} + hostNetwork: true + {{- end }} +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- else if .Values.controller.existingImagePullSecret }} + imagePullSecrets: + - name: {{ .Values.controller.existingImagePullSecret }} +{{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if and .Values.controller.defaultTLSSecret.enabled -}} +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} +{{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + {{- if .Values.controller.unprivileged }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .port }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ .port }} + {{- end }} + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + startupProbe: + failureThreshold: {{ .Values.controller.startupProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.startupProbe.path }} + port: {{ .Values.controller.startupProbe.port }} + scheme: {{ .Values.controller.startupProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.startupProbe.periodSeconds }} + successThreshold: {{ .Values.controller.startupProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.startupProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range .Values.controller.extraEnvs }} + - name: "{{ .name }}" + value: "{{ .value }}" + {{- end }} + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- if .Values.controller.lifecycle }} + lifecycle: + {{- if eq "string" (printf "%T" .Values.controller.lifecycle) }} +{{ tpl .Values.controller.lifecycle . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + volumeMounts: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumeMounts) }} +{{ tpl .Values.controller.extraVolumeMounts . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12 }} + {{- end }} + {{- end}} + {{- if .Values.controller.extraContainers }} + {{- if eq "string" (printf "%T" .Values.controller.extraContainers) }} +{{ tpl .Values.controller.extraContainers . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraContainers | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + volumes: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumes) }} +{{ tpl .Values.controller.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if or .Values.controller.unprivileged .Values.controller.initContainers }} + initContainers: + {{- if .Values.controller.unprivileged }} + - name: sysctl + image: busybox:musl + command: + - /bin/sh + - -c + - sysctl -w net.ipv4.ip_unprivileged_port_start=0 + securityContext: + privileged: true + {{- end }} + {{- with.Values.controller.initContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-defaultcertsecret.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-defaultcertsecret.yaml new file mode 100644 index 000000000..b409c7b25 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-defaultcertsecret.yaml @@ -0,0 +1,35 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.defaultTLSSecret.enabled }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +data: +{{ ( include "kubernetes-ingress.gen-certs" . ) | indent 2 }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-deployment.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-deployment.yaml new file mode 100644 index 000000000..53568e1d9 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-deployment.yaml @@ -0,0 +1,229 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "Deployment" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.extraLabels }} +{{ toYaml .Values.controller.extraLabels | indent 4 }} + {{- end }} +spec: + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.controller.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} +{{- with .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} +{{- end }} +{{- if .Values.controller.dnsConfig }} + dnsConfig: +{{ toYaml .Values.controller.dnsConfig | indent 8 }} +{{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- else if .Values.controller.existingImagePullSecret }} + imagePullSecrets: + - name: {{ .Values.controller.existingImagePullSecret }} +{{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} +{{- if .Values.defaultBackend.enabled }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + {{- if .Values.controller.unprivileged }} + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .targetPort }} + protocol: TCP + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + startupProbe: + failureThreshold: {{ .Values.controller.startupProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.startupProbe.path }} + port: {{ .Values.controller.startupProbe.port }} + scheme: {{ .Values.controller.startupProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.startupProbe.periodSeconds }} + successThreshold: {{ .Values.controller.startupProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.startupProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range .Values.controller.extraEnvs }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- if .Values.controller.lifecycle }} + lifecycle: + {{- if eq "string" (printf "%T" .Values.controller.lifecycle) }} +{{ tpl .Values.controller.lifecycle . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.lifecycle | indent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + volumeMounts: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumeMounts) }} +{{ tpl .Values.controller.extraVolumeMounts . | indent 12 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumeMounts | indent 12 }} + {{- end }} + {{- end}} + {{- if .Values.controller.extraContainers }} + {{- if eq "string" (printf "%T" .Values.controller.extraContainers) }} +{{ tpl .Values.controller.extraContainers . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraContainers | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.controller.extraVolumes }} + volumes: + {{- if eq "string" (printf "%T" .Values.controller.extraVolumes) }} +{{ tpl .Values.controller.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.controller.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if or .Values.controller.unprivileged .Values.controller.initContainers }} + initContainers: + {{- if .Values.controller.unprivileged }} + - name: sysctl + image: busybox:musl + command: + - /bin/sh + - -c + - sysctl -w net.ipv4.ip_unprivileged_port_start=0 + securityContext: + privileged: true + {{- end }} + {{- with.Values.controller.initContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-hpa.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-hpa.yaml new file mode 100644 index 000000000..102b23439 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-hpa.yaml @@ -0,0 +1,49 @@ +{{/* +Copyright 2020 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and (eq .Values.controller.kind "Deployment") .Values.controller.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "kubernetes-ingress.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- if .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-poddisruptionbudget.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 000000000..e08d25cc7 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,40 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.PodDisruptionBudget.enable }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + {{- if .Values.controller.PodDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.controller.PodDisruptionBudget.maxUnavailable }} + {{- end }} + {{- if .Values.controller.PodDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.controller.PodDisruptionBudget.minAvailable }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-podsecuritypolicy.yaml new file mode 100644 index 000000000..7851e2acf --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-podsecuritypolicy.yaml @@ -0,0 +1,80 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.fullname" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 +{{- if $useHostNetwork }} + hostNetwork: true +{{- end }} +{{- if or $useHostPort $useHostNetwork }} + hostPorts: +{{- range $key, $value := .Values.controller.containerPort }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- range .Values.controller.service.tcpPorts }} + - min: {{ .port }} + max: {{ .port }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-pullsecret.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-pullsecret.yaml new file mode 100644 index 000000000..88252394c --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-pullsecret.yaml @@ -0,0 +1,32 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.imageCredentials.registry }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "kubernetes-ingress.imagePullSecret" . }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-role.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-role.yaml new file mode 100644 index 000000000..3e41df6e4 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-rolebinding.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-rolebinding.yaml new file mode 100644 index 000000000..40404a401 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-service.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-service.yaml new file mode 100644 index 000000000..eb2eea381 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + annotations: +{{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} +{{- end }} +spec: + {{ with .Values.controller.service.clusterIP }}clusterIP: {{ . }}{{ end }} + type: {{ .Values.controller.service.type }} + {{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} + {{- end }} + ports: + {{- if .Values.controller.service.enablePorts.http }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if .Values.controller.service.nodePorts.http }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.https }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if .Values.controller.service.nodePorts.https }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.stat }} + - name: stat + port: {{ .Values.controller.service.ports.stat }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.stat }} + {{- if .Values.controller.service.nodePorts.stat }} + nodePort: {{ .Values.controller.service.nodePorts.stat }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + port: {{ .port }} + protocol: TCP + targetPort: {{ .targetPort }} + {{- if .nodePort }} + nodePort: {{ .nodePort }} + {{- end }} + {{- end }} + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} + {{- end }} + externalIPs: +{{- if .Values.controller.service.externalIPs }} +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end -}} +{{- if (eq .Values.controller.service.type "LoadBalancer") }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-serviceaccount.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-serviceaccount.yaml new file mode 100644 index 000000000..c90710990 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/controller-servicemonitor.yaml b/charts/haproxy/haproxy/1.12.500/templates/controller-servicemonitor.yaml new file mode 100644 index 000000000..0f4c2c3af --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/controller-servicemonitor.yaml @@ -0,0 +1,41 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kubernetes-ingress.serviceMonitorName" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + {{- if .Values.controller.serviceMonitor.extraLabels }} + {{ toYaml .Values.controller.serviceMonitor.extraLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + {{ .Values.controller.serviceMonitor.endpoints | toYaml | nindent 4 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-deployment.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-deployment.yaml new file mode 100644 index 000000000..af9930dc3 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-deployment.yaml @@ -0,0 +1,88 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.defaultBackend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + {{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podAnnotations }} + annotations: +{{ toYaml .Values.defaultBackend.podAnnotations | indent 8 }} + {{- end }} + spec: +{{- with .Values.defaultBackend.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} +{{- end }} +{{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.containerPort }} + protocol: TCP + {{- if .Values.defaultBackend.extraEnvs }} + env: + {{- range .Values.defaultBackend.extraEnvs }} + - name: "{{ .name }}" + value: "{{ .value }}" + {{- end }} + {{- end }} + resources: + {{- toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- with .Values.defaultBackend.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.defaultBackend.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + terminationGracePeriodSeconds: 60 + {{- with .Values.defaultBackend.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-hpa.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-hpa.yaml new file mode 100644 index 000000000..0fd8a65b7 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-hpa.yaml @@ -0,0 +1,49 @@ +{{/* +Copyright 2020 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.defaultBackend.autoscaling.enabled .Values.defaultBackend.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: + {{- if .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-podsecuritypolicy.yaml new file mode 100644 index 000000000..82397b57b --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-podsecuritypolicy.yaml @@ -0,0 +1,64 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + hostNetwork: false + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - max: 65535 + min: 1 + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-role.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-role.yaml new file mode 100644 index 000000000..8475d04fc --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-rolebinding.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-rolebinding.yaml new file mode 100644 index 000000000..3a94e9418 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-service.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-service.yaml new file mode 100644 index 000000000..6e0cf0e98 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-service.yaml @@ -0,0 +1,40 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.defaultBackend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.defaultBackend.service.port }} + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.12.500/templates/default-backend-serviceaccount.yaml b/charts/haproxy/haproxy/1.12.500/templates/default-backend-serviceaccount.yaml new file mode 100644 index 000000000..3c0853b14 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.12.500/values.yaml b/charts/haproxy/haproxy/1.12.500/values.yaml new file mode 100644 index 000000000..98b3e74a9 --- /dev/null +++ b/charts/haproxy/haproxy/1.12.500/values.yaml @@ -0,0 +1,162 @@ +controller: + PodDisruptionBudget: + enable: false + affinity: {} + autoscaling: + enabled: false + maxReplicas: 20 + minReplicas: 2 + targetCPUUtilizationPercentage: 80 + config: {} + containerPort: + http: 80 + https: 443 + stat: 1024 + daemonset: + hostPorts: + http: 80 + https: 443 + stat: 1024 + useHostNetwork: false + useHostPort: false + defaultTLSSecret: + enabled: true + secret: null + dnsConfig: {} + dnsPolicy: ClusterFirst + existingImagePullSecret: null + extraArgs: [] + extraContainers: [] + extraEnvs: [] + extraLabels: {} + extraVolumeMounts: [] + extraVolumes: [] + image: + pullPolicy: IfNotPresent + repository: haproxytech/kubernetes-ingress + tag: '{{ .Chart.AppVersion }}' + imageCredentials: + password: null + registry: null + username: null + ingressClass: null + initContainers: [] + kind: Deployment + lifecycle: {} + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + logging: + level: info + traffic: {} + name: controller + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + publishService: + enabled: false + pathOverride: "" + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + replicaCount: 2 + resources: + requests: + cpu: 100m + memory: 64Mi + service: + annotations: {} + enablePorts: + http: true + https: true + stat: true + enabled: true + externalIPs: [] + healthCheckNodePort: 0 + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: {} + ports: + http: 80 + https: 443 + stat: 1024 + targetPorts: + http: http + https: https + stat: stat + tcpPorts: [] + type: NodePort + serviceMonitor: + enabled: false + endpoints: + - path: /metrics + port: stat + scheme: http + extraLabels: {} + startupProbe: + failureThreshold: 20 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 1 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + strategy: {} + terminationGracePeriodSeconds: 60 + tolerations: [] + topologySpreadConstraints: [] + unprivileged: false +defaultBackend: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 2 + minReplicas: 1 + targetCPUUtilizationPercentage: 80 + containerPort: 8080 + enabled: true + extraEnvs: [] + image: + pullPolicy: IfNotPresent + repository: k8s.gcr.io/defaultbackend-amd64 + runAsUser: 65534 + tag: 1.5 + name: default-backend + nodeSelector: {} + podAnnotations: {} + podLabels: {} + priorityClassName: "" + replicaCount: 2 + resources: + requests: + cpu: 10m + memory: 16Mi + service: + port: 8080 + serviceAccount: + create: true + tolerations: [] + topologySpreadConstraints: [] +podSecurityPolicy: + annotations: {} + enabled: false +rbac: + create: true +serviceAccount: + create: true + name: null diff --git a/charts/haproxy/haproxy/1.4.300/.helmignore b/charts/haproxy/haproxy/1.4.300/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/haproxy/haproxy/1.4.300/Chart.yaml b/charts/haproxy/haproxy/1.4.300/Chart.yaml new file mode 100644 index 000000000..82f44ba5e --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy +apiVersion: v1 +appVersion: 1.4.6 +description: A Helm chart for HAProxy Kubernetes Ingress Controller +home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress +icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png +keywords: +- ingress +- haproxy +kubeVersion: '>=1.12.0-0' +maintainers: +- email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi +- email: bassmann@haproxy.com + name: Baptiste Assmann +- email: dkorunic@haproxy.com + name: Dinko Korunic +name: haproxy +sources: +- https://github.com/haproxytech/kubernetes-ingress +version: 1.4.300 diff --git a/charts/haproxy/haproxy/1.4.300/README.md b/charts/haproxy/haproxy/1.4.300/README.md new file mode 100644 index 000000000..73e4e2fcb --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/README.md @@ -0,0 +1,190 @@ +# ![HAProxy](https://github.com/haproxytech/kubernetes-ingress/raw/master/assets/images/haproxy-weblogo-210x49.png "HAProxy") + +## HAProxy Kubernetes Ingress Controller + +An ingress controller is a Kubernetes resource that routes traffic from outside your cluster to services within the cluster. HAProxy Kubernetes Ingress Controller uses ConfigMap to store the haproxy configuration. + +Detailed documentation can be found within the [Official Documentation](https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/). + +Additional configuration details can be found in [annotation reference](https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation) and in image [arguments reference](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md). + +## Introduction + +This chart bootstraps an HAProxy kubernetes-ingress deployment/daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +### Prerequisites + + - Kubernetes 1.12+ + - Helm 2.9+ + +## Before you begin + +### Setup a Kubernetes Cluster + +The quickest way to setup a Kubernetes cluster is with [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/), [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/) or [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) using their respective quick-start guides. + +For setting up Kubernetes on other cloud platforms or bare-metal servers refer to the Kubernetes [getting started guide](http://kubernetes.io/docs/getting-started-guides/). + +### Install Helm + +Get the latest [Helm release](https://github.com/helm/helm#install). + +### Add Helm chart repo + +Once you have Helm installed, add the repo as follows: + +```console +helm repo add haproxytech https://haproxytech.github.io/helm-charts +helm repo update +``` + +## Install the chart + +To install the chart with Helm v3 as *my-release* deployment: + +```console +helm install my-release haproxytech/kubernetes-ingress +``` + +***NOTE***: To install the chart with Helm v2 (legacy Helm) the syntax requires adding deployment name to `--name` parameter: + +```console +helm install haproxytech/kubernetes-ingress \ + --name my-release +``` + +### Installing with unique name + +To auto-generate controller and its resources names when installing, use the following: + +```console +helm install haproxytech/kubernetes-ingress \ + --generate-name +``` + +### Installing from a private registry + +To install the chart using a private registry for controller into a separate namespace *prod*. + +***NOTE***: Helm v3 requires namespace to be precreated (eg. with ```kubectl create namespace prod```) + +```console +helm install my-ingress haproxytech/kubernetes-ingress \ + --namespace prod \ + --set controller.image.tag=SOMETAG \ + --set controller.imageCredentials.registry=myregistry.domain.com \ + --set controller.imageCredentials.username=MYUSERNAME \ + --set controller.imageCredentials.password=MYPASSWORD +``` + +### Installing as DaemonSet + +Default controller mode is [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), but it is possible to use [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) as well: + +```console +helm install my-ingress2 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet +``` + +### Installing in multi-ingress environment + +It is also possible to set controller ingress class to be used in [multi-ingress environments](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/#using-multiple-ingress-controllers): + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy +``` + +***NOTE***: make sure your Ingress routes have corresponding `ingress.class: haproxy` annotation. + +### Installing with service annotations + +On some environments like EKS and GKE there might be a need to pass service annotations. Syntax can become a little tedious however: + +```console +helm install my-ingress3 haproxytech/kubernetes-ingress \ + --set controller.kind=DaemonSet \ + --set controller.ingressClass=haproxy \ + --set controller.service.type=LoadBalancer \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-internal"="0.0.0.0/0" \ + --set controller.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled"="true" +``` + +***NOTE***: With helm `--set` it is needed to put quotes and escape dots in the annotation key and commas in the value string. + +### Installing with Horizontal Pod Autoscaler + +[HPA](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) automatically scales number of replicas in Deployment or Replication Controller and adjusts replica count. Therefore we want to unset default replicaCount for controller and defaultBackend by setting corresponding key values to null: + +```console +helm install my-ingress4 haproxytech/kubernetes-ingress \ + --set controller.replicaCount=null \ + --set defaultBackend.replicaCount=null +``` + +### Using values from YAML file + +As opposed to using many `--set` invocations, much simpler approach is to define value overrides in a separate YAML file and specify them when invoking Helm: + +*mylb.yaml*: + +```yaml +controller: + kind: DaemonSet + ingressClass: haproxy + service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 +``` + +And invoking Helm becomes (compare to the previous example): + +```console +helm install my-ingress4 -f mylb.yml haproxytech/kubernetes-ingress +``` + +A typical YAML file for TCP services looks like (provided that configmap "[default/tcp](https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md)" was created) : + +```yaml +controller: + service: + tcpPorts: + - name: mysql + port: 3306 + targetPort: 3306 + extraArgs: + - --configmap-tcp-services=default/tcp +``` + +## Upgrading the chart + +To upgrade the *my-release* deployment: + +```console +helm upgrade my-release haproxytech/kubernetes-ingress +``` + +## Uninstalling the chart + +To uninstall/delete the *my-release* deployment: + +```console +helm delete kubernetes-ingress +``` + +## Debugging + +It is possible to generate a set of YAML files for testing/debugging: + +```console +helm install my-release haproxytech/kubernetes-ingress \ + --debug \ + --dry-run +``` + +## Contributing + +We welcome all contributions. Please refer to [guidelines](../CONTRIBUTING.md) on how to make a contribution. diff --git a/charts/haproxy/haproxy/1.4.300/app-readme.md b/charts/haproxy/haproxy/1.4.300/app-readme.md new file mode 100644 index 000000000..aae3d1bd8 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/app-readme.md @@ -0,0 +1,8 @@ +# HAProxy +[HAProxy](https://www.haproxy.org/) is the world's fastest and most widely used software load balancer. HAProxy allows organizations to deliver websites and applications with the utmost performance, observability, and security at any scale and in any environment. + +# HAProxy Enterprise +[HAProxy Enterprise](https://www.haproxy.com/products/haproxy-enterprise-edition/) is an enterprise-class version of HAProxy providing a robust and reliable code base with cutting edge features, an enterprise suite of add-ons, expert support, and professional services. At its core, it incorporates feature backports from the HAProxy development branch for customers who require immediate access to the latest functionality in a hardened version of code. + +## Introduction +This chart bootstraps the [HAProxy Ingress Controller](https://github.com/haproxytech/kubernetes-ingress) or the [HAProxy Enterprise Ingress Controller](https://www.haproxy.com/products/haproxy-enterprise-kubernetes-ingress-controller/) using the [Helm](https://helm.sh) package manager. diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-customconfig-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-customconfig-values.yaml new file mode 100644 index 000000000..116158a14 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-customnodeport-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 000000000..c9de04c16 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-default-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-default-values.yaml new file mode 100644 index 000000000..ddb25623a --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-default-values.yaml @@ -0,0 +1,2 @@ +controller: + kind: DaemonSet diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..362fbb982 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-disabledsecretconfig-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-enableports-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-enableports-values.yaml new file mode 100644 index 000000000..9a41dac52 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-enableports-values.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-extraargs-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-extraargs-values.yaml new file mode 100644 index 000000000..691acbc44 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-extraargs-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-hostport-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-hostport-values.yaml new file mode 100644 index 000000000..45042ea50 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-hostport-values.yaml @@ -0,0 +1,8 @@ +controller: + kind: DaemonSet + daemonset: + useHostPort: true + hostPorts: + http: 80 + https: 443 + stat: 1024 diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-nodeport-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-nodeport-values.yaml new file mode 100644 index 000000000..ebc8f1020 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-publishservice-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-publishservice-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-publishservice-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.4.300/ci/daemonset-serviceannotation-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/daemonset-serviceannotation-values.yaml new file mode 100644 index 000000000..b538cb542 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/daemonset-serviceannotation-values.yaml @@ -0,0 +1,5 @@ +controller: + kind: DaemonSet + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-customconfig-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-customconfig-values.yaml new file mode 100644 index 000000000..12c48d22d --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-customconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + config: + rate-limit: "ON" diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-customnodeport-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-customnodeport-values.yaml new file mode 100644 index 000000000..f044362aa --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: NodePort + ports: + 8000: 10000 + 8001: 10001 diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-default-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-default-values.yaml new file mode 100644 index 000000000..792d60054 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-default-values.yaml @@ -0,0 +1 @@ +# diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-disabledsecretconfig-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-disabledsecretconfig-values.yaml new file mode 100644 index 000000000..767645997 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-disabledsecretconfig-values.yaml @@ -0,0 +1,3 @@ +controller: + defaultTLSSecret: + enabled: false diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-enableports-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-enableports-values.yaml new file mode 100644 index 000000000..03ff297b4 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-enableports-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + enablePorts: + http: false + https: true + stat: false diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-extraargs-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-extraargs-values.yaml new file mode 100644 index 000000000..d0e1dbe73 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-extraargs-values.yaml @@ -0,0 +1,3 @@ +controller: + extraArgs: + - --namespace-whitelist=default diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-nodeport-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-nodeport-values.yaml new file mode 100644 index 000000000..ffdc47b2d --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-nodeport-values.yaml @@ -0,0 +1,3 @@ +controller: + service: + type: NodePort diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-publishservice-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-publishservice-values.yaml new file mode 100644 index 000000000..6d8bf9bf7 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-publishservice-values.yaml @@ -0,0 +1,4 @@ +controller: + kind: DaemonSet + publishService: + enabled: true diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-replicacount-unset.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-replicacount-unset.yaml new file mode 100644 index 000000000..78ee30060 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-replicacount-unset.yaml @@ -0,0 +1,5 @@ +controller: + replicaCount: null + +defaultBackend: + replicaCount: null diff --git a/charts/haproxy/haproxy/1.4.300/ci/deployment-serviceannotation-values.yaml b/charts/haproxy/haproxy/1.4.300/ci/deployment-serviceannotation-values.yaml new file mode 100644 index 000000000..c103bd6c9 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/ci/deployment-serviceannotation-values.yaml @@ -0,0 +1,4 @@ +controller: + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 diff --git a/charts/haproxy/haproxy/1.4.300/questions.yml b/charts/haproxy/haproxy/1.4.300/questions.yml new file mode 100644 index 000000000..27ff89e72 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/questions.yml @@ -0,0 +1,73 @@ +questions: +- variable: imageDefault + default: true + description: "Use default Docker image" + label: Use Default Image + type: boolean + group: "Settings" + show_subquestion_if: false + subquestions: + - variable: controller.image.tag + default: "1.4.6" + description: "HAProxy Ingress Controller Tag" + type: string + label: HAProxy Ingress Controller Tag +- variable: controller.kind + type: enum + options: + - "DaemonSet" + - "Deployment" + default: "Deployment" + description: "Deployment Type" + label: Deployment Type + group: "Settings" +- variable: controller.service.type + type: enum + options: + - "LoadBalancer" + - "NodePort" + default: "NodePort" + description: "Service Type for HAProxy Ingress Controller" + label: Service Type + group: "Settings" +- variable: controller.ingressClass + default: "" + description: "Ingress Class for targeting this controller" + label: Ingress Class + type: string + group: "Settings" +- variable: controller.defaultTLSSecret.secret + default: "" + description: "Default TLS certificate secret" + label: TLS Certificate Secret + type: string + group: "Settings" +- variable: enableEnterprise + default: false + description: "Use HAProxy Enterprise" + label: Enable + type: boolean + group: "HAProxy Enterprise" + show_subquestion_if: true + subquestions: + - variable: controller.imageCredentials.registry + type: string + default: "kubernetes-registry.haproxy.com" + description: "HAProxy Enterprise Registtry" + label: Registry + - variable: controller.image.repository + type: string + default: "kubernetes-registry.haproxy.com/hapee-ingress" + description: "HAProxy Enterprise Registry" + label: Repository + - variable: controller.imageCredentials.username + type: string + default: "MYUSERNAME" + description: "HAProxy Enterprise Username" + label: Username + - variable: controller.imageCredentials.password + type: string + default: "MYPASSWORD" + description: "HAProxy Enterprise Password" + label: Password + diff --git a/charts/haproxy/haproxy/1.4.300/templates/NOTES.txt b/charts/haproxy/haproxy/1.4.300/templates/NOTES.txt new file mode 100644 index 000000000..522e23017 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/NOTES.txt @@ -0,0 +1,67 @@ +HAProxy Kubernetes Ingress Controller has been successfully installed. + +Controller image deployed is: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}". +Your controller is of a "{{ .Values.controller.kind }}" kind. Your controller service is running as a "{{ .Values.controller.service.type }}" type. +{{- if and .Values.rbac.create}} +RBAC authorization is enabled. +{{- else}} +RBAC authorization is disabled. +{{- end}} +{{- if .Values.controller.ingressClass}} +Controller ingress.class is set to "{{ .Values.controller.ingressClass }}" so make sure to use same annotation for +Ingress resource. +{{- end}} + +Service ports mapped are: +{{- if eq .Values.controller.kind "Deployment" }} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +{{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + hostPort: {{ index $hostPorts $key | default $value }} +{{- end }} +{{- end }} + +Node IP can be found with: + $ kubectl --namespace {{ .Release.Namespace }} get nodes -o jsonpath="{.items[0].status.addresses[1].address}" + +The following ingress resource routes traffic to pods that match the following: + * service name: web + * client's Host header: webdemo.com + * path begins with / + + --- + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + metadata: + name: web-ingress + namespace: default + spec: + rules: + - host: webdemo.com + http: + paths: + - path: / + backend: + serviceName: web + servicePort: 80 + +In case that you are using multi-ingress controller environment, make sure to use ingress.class annotation and match it +with helm chart option controller.ingressClass. + +For more examples and up to date documentation, please visit: + * Helm chart documentation: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + * Controller documentation: https://www.haproxy.com/documentation/hapee/2-0r1/traffic-management/kubernetes-ingress-controller/ + * Annotation reference: https://github.com/haproxytech/kubernetes-ingress/tree/master/documentation + * Image parameters reference: https://github.com/haproxytech/kubernetes-ingress/blob/master/documentation/controller.md + + + diff --git a/charts/haproxy/haproxy/1.4.300/templates/_helpers.tpl b/charts/haproxy/haproxy/1.4.300/templates/_helpers.tpl new file mode 100644 index 000000000..23a9063ef --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/_helpers.tpl @@ -0,0 +1,123 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubernetes-ingress.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{kubernetes-ingress.publishServicePath{/* +Encode an imagePullSecret string. +*/}} +{{- define "kubernetes-ingress.imagePullSecret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.controller.imageCredentials.registry (printf "%s:%s" .Values.controller.imageCredentials.username .Values.controller.imageCredentials.password | b64enc) | b64enc }} +{{- end }} + +{{/* +Generate default certificate for HAProxy. +*/}} +{{- define "kubernetes-ingress.gen-certs" -}} +{{- $ca := genCA "kubernetes-ingress-ca" 365 -}} +{{- $cn := printf "%s.%s" .Release.Name .Release.Namespace -}} +{{- $cert := genSignedCert $cn nil nil 365 $ca -}} +tls.crt: {{ $cert.Cert | b64enc }} +tls.key: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Create the name of the controller service account to use. +*/}} +{{- define "kubernetes-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kubernetes-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "kubernetes-ingress.defaultBackend.serviceAccountName" -}} +{{- if or .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +*/}} +{{- define "kubernetes-ingress.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default cert secret name. +*/}} +{{- define "kubernetes-ingress.defaultTLSSecret.fullname" -}} +{{- printf "%s-%s" (include "kubernetes-ingress.fullname" .) "default-cert" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. +By default this will use the / matching the controller's service name. +Users can provide an override for an explicit service they want to use via `.Values.controller.publishService.pathOverride` +*/}} +{{- define "kubernetes-ingress.publishServicePath" -}} +{{- $defServicePath := printf "%s/%s" .Release.Namespace (include "kubernetes-ingress.fullname" .) -}} +{{- $servicePath := default $defServicePath .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the syslog-server annotation +*/}} +{{- define "kubernetes-ingress.syslogServer" -}} +{{- range $key, $val := .Values.controller.logging.traffic -}} +{{- printf "%s:%s, " $key $val }} +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/clusterrole.yaml b/charts/haproxy/haproxy/1.4.300/templates/clusterrole.yaml new file mode 100644 index 000000000..89cb9f829 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/clusterrole.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - services + - namespaces + - events + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + resources: + - ingresses + - ingresses/status + verbs: + - get + - list + - watch + - update +- apiGroups: + - "networking.k8s.io/v1beta1" + resources: + - ingresses + - ingresses/status + verbs: + - get + - list + - watch +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/clusterrolebinding.yaml b/charts/haproxy/haproxy/1.4.300/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..cfd226083 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/clusterrolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} + diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-configmap.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-configmap.yaml new file mode 100644 index 000000000..94aa9c554 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-configmap.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +data: +{{- if .Values.controller.logging.traffic }} + syslog-server: {{ template "kubernetes-ingress.syslogServer" . }} +{{- end }} +{{- if .Values.controller.config }} +{{ toYaml .Values.controller.config | indent 2 }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-daemonset.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-daemonset.yaml new file mode 100644 index 000000000..7260d3227 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-daemonset.yaml @@ -0,0 +1,153 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "DaemonSet" }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork -}} +{{- $useHostPort := .Values.controller.daemonset.useHostPort -}} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + minReadySeconds: 0 + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} + {{- if $useHostNetwork }} + hostNetwork: true + {{- end }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if and .Values.controller.defaultTLSSecret.enabled -}} +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ index $hostPorts $key | default $value }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .port }} + protocol: TCP + {{- if $useHostPort }} + hostPort: {{ .port }} + {{- end }} + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- with.Values.controller.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-defaultcertsecret.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-defaultcertsecret.yaml new file mode 100644 index 000000000..bb97b1e05 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-defaultcertsecret.yaml @@ -0,0 +1,33 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation" +data: +{{ ( include "kubernetes-ingress.gen-certs" . ) | indent 2 }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-deployment.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-deployment.yaml new file mode 100644 index 000000000..7fd2aae2e --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-deployment.yaml @@ -0,0 +1,141 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if eq .Values.controller.kind "Deployment" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + {{- if not ( kindIs "invalid" .Values.controller.replicaCount) }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.controller.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.podLabels }} +{{ toYaml .Values.controller.podLabels | indent 8 }} + {{- end }} + {{- if .Values.controller.podAnnotations }} + annotations: +{{ toYaml .Values.controller.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kubernetes-ingress.serviceAccountName" . }} +{{- if .Values.controller.imageCredentials.registry }} + imagePullSecrets: + - name: {{ template "kubernetes-ingress.fullname" . }} +{{- end }} + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.controller.name }} + image: "{{ .Values.controller.image.repository }}:{{ tpl .Values.controller.image.tag . }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + args: +{{- if .Values.controller.defaultTLSSecret.secret }} + - --default-ssl-certificate={{ .Values.controller.defaultTLSSecret.secret }} +{{- else }} + - --default-ssl-certificate={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultTLSSecret.fullname" . }} +{{- end }} + - --configmap={{ .Release.Namespace }}/{{ template "kubernetes-ingress.fullname" . }} + - --default-backend-service={{ .Release.Namespace }}/{{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- if .Values.controller.ingressClass }} + - --ingress.class={{ .Values.controller.ingressClass }} +{{- end }} +{{- if .Values.controller.publishService.enabled }} + - --publish-service={{ template "kubernetes-ingress.publishServicePath" . }} +{{- end }} +{{- if .Values.controller.logging.level }} + - --log={{ .Values.controller.logging.level }} +{{- end }} +{{- range .Values.controller.extraArgs }} + - {{ . }} +{{- end }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + containerPort: {{ .port }} + protocol: TCP + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.controller.livenessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.livenessProbe.path }} + port: {{ .Values.controller.livenessProbe.port }} + scheme: {{ .Values.controller.livenessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.controller.readinessProbe.failureThreshold }} + httpGet: + path: {{ .Values.controller.readinessProbe.path }} + port: {{ .Values.controller.readinessProbe.port }} + scheme: {{ .Values.controller.readinessProbe.scheme }} + initialDelaySeconds: {{ .Values.controller.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.controller.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.controller.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.controller.readinessProbe.timeoutSeconds }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + {{- with.Values.controller.initContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-podsecuritypolicy.yaml new file mode 100644 index 000000000..77d220f3a --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-podsecuritypolicy.yaml @@ -0,0 +1,75 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +{{- $useHostNetwork := .Values.controller.daemonset.useHostNetwork }} +{{- $useHostPort := .Values.controller.daemonset.useHostPort }} +{{- $hostPorts := .Values.controller.daemonset.hostPorts -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.fullname" . }} +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs +{{- if $useHostNetwork }} + hostNetwork: true +{{- end }} +{{- if or $useHostPort $useHostNetwork }} + hostPorts: +{{- range $key, $value := .Values.controller.containerPort }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- range .Values.controller.service.tcpPorts }} + - min: {{ .port }} + max: {{ .port }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-pullsecret.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-pullsecret.yaml new file mode 100644 index 000000000..88252394c --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-pullsecret.yaml @@ -0,0 +1,32 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.imageCredentials.registry }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "kubernetes-ingress.imagePullSecret" . }} +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-role.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-role.yaml new file mode 100644 index 000000000..3e41df6e4 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-rolebinding.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-rolebinding.yaml new file mode 100644 index 000000000..40404a401 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-service.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-service.yaml new file mode 100644 index 000000000..2fa164137 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-service.yaml @@ -0,0 +1,100 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- if .Values.controller.service.labels }} +{{ toYaml .Values.controller.service.labels | indent 4 }} +{{- end }} + annotations: +{{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} +{{- end }} +spec: + {{ with .Values.controller.service.clusterIP }}clusterIP: {{ . }}{{ end }} + type: {{ .Values.controller.service.type }} + {{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} + {{- end }} + ports: + {{- if .Values.controller.service.enablePorts.http }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if .Values.controller.service.nodePorts.http }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.https }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if .Values.controller.service.nodePorts.https }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enablePorts.stat }} + - name: stat + port: {{ .Values.controller.service.ports.stat }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.stat }} + {{- if .Values.controller.service.nodePorts.stat }} + nodePort: {{ .Values.controller.service.nodePorts.stat }} + {{- end }} + {{- end }} + {{- range .Values.controller.service.tcpPorts }} + - name: {{ .name }}-tcp + port: {{ .port }} + protocol: TCP + targetPort: {{ .targetPort }} + {{- if .nodePort }} + nodePort: {{ .nodePort }} + {{- end }} + {{- end }} + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} + {{- end }} + externalIPs: +{{- if .Values.controller.service.externalIPs }} +{{ toYaml .Values.controller.service.externalIPs | indent 4 }} +{{- end -}} +{{- if (eq .Values.controller.service.type "LoadBalancer") }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.controller.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.controller.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} +{{- end }} + diff --git a/charts/haproxy/haproxy/1.4.300/templates/controller-serviceaccount.yaml b/charts/haproxy/haproxy/1.4.300/templates/controller-serviceaccount.yaml new file mode 100644 index 000000000..c90710990 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/controller-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-deployment.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-deployment.yaml new file mode 100644 index 000000000..3dd04e012 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-deployment.yaml @@ -0,0 +1,71 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + {{- if not (kindIs "invalid" .Values.defaultBackend.replicaCount) }} + replicas: {{ .Values.defaultBackend.replicaCount }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.defaultBackend.podLabels }} +{{ toYaml .Values.defaultBackend.podLabels | indent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podAnnotations }} + annotations: +{{ toYaml .Values.defaultBackend.podAnnotations | indent 8 }} + {{- end }} + spec: + containers: + - name: {{ template "kubernetes-ingress.name" . }}-{{ .Values.defaultBackend.name }} + image: "{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}" + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.containerPort }} + protocol: TCP + resources: + {{- toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- with .Values.defaultBackend.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.defaultBackend.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + {{- with .Values.defaultBackend.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-podsecuritypolicy.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-podsecuritypolicy.yaml new file mode 100644 index 000000000..a31d60e8f --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-podsecuritypolicy.yaml @@ -0,0 +1,59 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +spec: + allowPrivilegeEscalation: false + allowedCapabilities: + - NET_BIND_SERVICE + defaultAllowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + hostNetwork: false + hostIPC: false + hostPID: false + privileged: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - downwardAPI + - secret +{{- end }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-role.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-role.yaml new file mode 100644 index 000000000..8ca2416ae --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-role.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +rules: +- apiGroups: + - "policy" + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-rolebinding.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-rolebinding.yaml new file mode 100644 index 000000000..a27f80465 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-rolebinding.yaml @@ -0,0 +1,37 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-service.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-service.yaml new file mode 100644 index 000000000..b3108ad61 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-service.yaml @@ -0,0 +1,38 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: http + port: {{ .Values.defaultBackend.service.port }} + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/name: {{ template "kubernetes-ingress.defaultBackend.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/haproxy/haproxy/1.4.300/templates/default-backend-serviceaccount.yaml b/charts/haproxy/haproxy/1.4.300/templates/default-backend-serviceaccount.yaml new file mode 100644 index 000000000..9a5e8169f --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,29 @@ +{{/* +Copyright 2019 HAProxy Technologies LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if and .Values.serviceAccount.create .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ template "kubernetes-ingress.name" . }} + helm.sh/chart: {{ template "kubernetes-ingress.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/haproxy/haproxy/1.4.300/values.yaml b/charts/haproxy/haproxy/1.4.300/values.yaml new file mode 100644 index 000000000..addf65446 --- /dev/null +++ b/charts/haproxy/haproxy/1.4.300/values.yaml @@ -0,0 +1,116 @@ +controller: + affinity: {} + config: {} + containerPort: + http: 80 + https: 443 + stat: 1024 + daemonset: + hostPorts: + http: 80 + https: 443 + stat: 1024 + useHostNetwork: false + useHostPort: false + defaultTLSSecret: + enabled: true + secret: null + extraArgs: [] + image: + pullPolicy: IfNotPresent + repository: haproxytech/kubernetes-ingress + tag: '{{ .Chart.AppVersion }}' + imageCredentials: + password: null + registry: null + username: null + ingressClass: null + initContainers: [] + kind: Deployment + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + logging: + level: info + traffic: {} + name: controller + nodeSelector: {} + podAnnotations: {} + podLabels: {} + publishService: + enabled: false + pathOverride: "" + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 0 + path: /healthz + periodSeconds: 10 + port: 1042 + scheme: HTTP + successThreshold: 1 + timeoutSeconds: 1 + replicaCount: 2 + resources: + requests: + cpu: 100m + memory: 64Mi + service: + annotations: {} + enablePorts: + http: true + https: true + stat: true + externalIPs: [] + healthCheckNodePort: 0 + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: {} + ports: + http: 80 + https: 443 + stat: 1024 + targetPorts: + http: http + https: https + stat: stat + tcpPorts: [] + type: NodePort + strategy: {} + tolerations: [] +defaultBackend: + affinity: {} + containerPort: 8080 + image: + pullPolicy: IfNotPresent + repository: k8s.gcr.io/defaultbackend-amd64 + runAsUser: 65534 + tag: 1.5 + name: default-backend + nodeSelector: {} + podAnnotations: {} + podLabels: {} + replicaCount: 2 + resources: + requests: + cpu: 10m + memory: 16Mi + service: + port: 8080 + serviceAccount: + create: true + tolerations: [] +podSecurityPolicy: + annotations: {} + enabled: false +rbac: + create: true +serviceAccount: + create: true + name: null diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/Chart.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/Chart.yaml new file mode 100644 index 000000000..cb4b7dd2f --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-csi-driver +apiVersion: v1 +appVersion: 1.3.0 +description: A Helm chart for installing the HPE CSI Driver for Kubernetes +home: https://hpe.com/storage/containers +icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png +keywords: +- HPE +- Storage +- StorageClass +- CentOS +- Ubuntu +- RHEL +maintainers: +- email: hpe-containers-dev@hpe.com + name: shivamerla +name: hpe-csi-driver +sources: +- https://scod.hpedev.io/csi_driver +version: 1.3.000 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/README.md b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/README.md new file mode 100644 index 000000000..4cdf3f49d --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/README.md @@ -0,0 +1,110 @@ +# HPE CSI Driver for Kubernetes Helm chart + +The [HPE CSI Driver for Kubernetes](https://scod.hpedev.io/csi_driver/index.html) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. + +## Prerequisites + +- Upstream Kubernetes version >= 1.15 +- Most Kubernetes distributions are supported +- Recent Ubuntu, SLES, CentOS or RHEL compute nodes connected to their respective official package repositories +- Helm 3 (Version >= 3.2.0 required) + +Depending on which [Container Storage Provider](https://scod.hpedev.io/container_storage_provider/index.html) (CSP) is being used, other prerequisites and requirements may apply, such as storage platform OS and features. + +- [HPE Nimble Storage](https://scod.hpedev.io/container_storage_provider/hpe_nimble_storage/index.html) +- [HPE 3PAR and Primera](https://scod.hpedev.io/container_storage_provider/hpe_3par_primera/index.html) + +## Configuration and installation + +The following table lists the configurable parameters of the HPE-CSI chart and their default values. + +| Parameter | Description | Default | +|---------------------------|------------------------------------------------------------------------|--------------| +| logLevel | Log level. Can be one of `info`, `debug`, `trace`, `warn` and `error`. | info | +| imagePullPolicy | Image pull policy (`Always`, `IfNotPresent`, `Never`). | IfNotPresent | +| disableNodeConformance | Disable automatic installation of iSCSI/Multipath Packages. | false | +| iscsi.chapUser | Username for iSCSI CHAP authentication. | "" | +| iscsi.chapPassword | Password for iSCSI CHAP authentication. | "" | + +It's recommended to create a [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver) file from the corresponding release of the chart and edit it to fit the environment the chart is being deployed to. Download and edit [a sample file](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). + +These are the bare minimum required parameters for a successful deployment to an iSCSI environment if CHAP authentication is required. + +``` +iscsi: + chapUser: + chapPassword: +``` + +Tweak any additional parameters to suit the environment or as prescribed by HPE. + +### Installing the chart + +To install the chart with the name `hpe-csi`: + +Add HPE helm repo: + +``` +helm repo add hpe https://hpe-storage.github.io/co-deployments +helm repo update +``` + +Install the latest chart: + +``` +helm install hpe-csi hpe/hpe-csi-driver --namespace kube-system -f myvalues.yaml +``` + +**Note**: values.yaml is optional if no parameters are overridden from defaults. + +### Upgrading the Chart + +To upgrade the chart, specify the version you want to upgrade to as below. Please do NOT re-use a full blown `values.yaml` from prior versions to upgrade to later versions. Always use `values.yaml` from corresponding release from [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver) + +List the avaiable version of the plugin: + +``` +helm repo update +helm search repo hpe-csi-driver -l +``` + +Select the target version to upgrade as below: + +``` +helm upgrade hpe-csi hpe/hpe-csi-driver --namespace kube-system --version=x.x.x.x -f myvalues.yaml +``` + +### Uninstalling the Chart + +To uninstall the `hpe-csi` chart: + +``` +helm uninstall hpe-csi --namespace kube-system +``` + +**Note**: Due to a limitation in Helm, CRDs are not deleted as part of the chart uninstall. + +### Alternative install method + +In some cases it's more practical to provide the local configuration via the `helm` CLI directly. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. These will take precedence over entries in [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). For example: + +``` +helm install hpe-csi hpe/hpe-csi-driver --namespace kube-system --set iscsi.chapUsername=admin \ +--set iscsi.chapPassword=xxxxxxxx +``` + +## Using persistent storage with Kubernetes + +Enable dynamic provisioning of persistent storage by creating a `StorageClass` API object that references a `Secret` which maps to a supported HPE primary storage backend. Refer to the [HPE CSI Driver for Kubernetes](https://scod.hpedev.io/csi_driver/using.html) documentation on [HPE Storage Container Orchestration Documentation](https://scod.hpedev.io/). Also, it's helpful to be familiar with [persistent storage concepts](https://kubernetes.io/docs/concepts/storage/volumes/) in Kubernetes prior to deploying stateful workloads. + +## Support + +The HPE CSI Driver for Kubernetes Helm chart is covered by your HPE support contract. Please file any issues, questions or feature requests [here](https://github.com/hpe-storage/co-deployments/issues) or contact HPE through the regular support channels. You may also join our Slack community to chat with HPE folks close to this project. We hang out in `#NimbleStorage`, `#3par-primera` and `#Kubernetes` at [hpedev.slack.com](https://hpedev.slack.com), sign up here: [slack.hpedev.io](https://slack.hpedev.io/). + +## Contributing + +We value all feedback and contributions. If you find any issues or want to contribute, please feel free to open an issue or file a PR. More details in [CONTRIBUTING.md](https://github.com/hpe-storage/co-deployments/blob/master/CONTRIBUTING.md) + +## License + +This is open source software licensed using the Apache License 2.0. Please see [LICENSE](https://github.com/hpe-storage/co-deployments/blob/master/LICENSE) for details. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/app-readme.md b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/app-readme.md new file mode 100644 index 000000000..29ca912cb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/app-readme.md @@ -0,0 +1,3 @@ +# HPE CSI Driver for Kubernetes + +The [HPE CSI Driver for Kubernetes](https://github.com/hpe-storage/csi-driver) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-nodeinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-nodeinfo-crd.yaml new file mode 100644 index 000000000..e63caf45c --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-nodeinfo-crd.yaml @@ -0,0 +1,54 @@ +--- +############################################# +############ HPE Node Info CRD ############ +############################################# +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpenodeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPENodeInfo + plural: hpenodeinfos + scope: Cluster + validation: + openAPIV3Schema: + properties: + hpeNodes: + description: List of HPE nodes configured for storage access. + items: + properties: + uuid: + description: The UUID of the node. + type: string + iqns: + description: List of IQNs configured on the node. + items: + type: string + type: array + chapUser: + description: The CHAP User Name + type: string + chapPassword: + description: The CHAP Password + type: string + networks: + description: List of networks configured on the node. + items: + type: string + type: array + wwpns: + description: List of WWPNs configured on the node. + items: + type: string + type: array + type: array + version: v1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-replicated-device-info-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-replicated-device-info-crd.yaml new file mode 100644 index 000000000..846d76edd --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-replicated-device-info-crd.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpereplicationdeviceinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEReplicationDeviceInfo + plural: hpereplicationdeviceinfos + shortNames: + - hperdi + + scope: Cluster + validation: + openAPIV3Schema: + properties: + hpeReplicationDeviceInfos: + description: List of HPE Replicated Arrays configured for 3PAR/Primera arrays. + items: + properties: + targets: + description: Target Array Details + type: array + items: + properties: + targetName: + description: Target Name of the array + type: string + targetCpg: + description: Target CPG of the array + type: string + targetSnapCpg: + description: Target Snap CPG of the array + type: string + targetSecret: + description: Secret of the replicated array + type: string + targetMode: + description: Replication Mode + type: string + targetSecretNamespace: + description: Namespace of secret + type: string + required: + - targetName + - targetCpg + - targetSecret + - targetSecretNamespace + type: array + version: v1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-volumeinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-volumeinfo-crd.yaml new file mode 100644 index 000000000..417f4f4f3 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/crds/hpe-volumeinfo-crd.yaml @@ -0,0 +1,32 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpevolumeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEVolumeInfo + plural: hpevolumeinfos + scope: Cluster + validation: + openAPIV3Schema: + properties: + hpeVolumes: + description: List of HPE volumes configured for 3PAR/Primera arrays. + items: + properties: + uuid: + description: The UUID of the volume. + type: string + record: + description: Metadata for the volume. + type: map[string]string + type: array + version: v1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/files/config.json b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/files/config.json new file mode 100644 index 000000000..d00650184 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/files/config.json @@ -0,0 +1,128 @@ +[ + { + "category": "iscsi", + "severity": "warning", + "description": "Manual startup of iSCSI nodes on boot. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "startup", + "recommendation": "manual" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Replacement_timeout of 10 seconds is recommended for faster failover of I/O by multipath on path failures. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "replacement_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum login timeout of 15 seconds is recommended with iSCSI. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "login_timeout", + "recommendation": "15" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum timeout of 10 seconds is recommended with noop requests. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "noop_out_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum cmds_max of 512 is recommended for each session if handling multiple LUN's. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "cmds_max", + "recommendation": "512" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum queue_depth of 256 is recommended for each iSCSI session/path. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "queue_depth", + "recommendation": "256" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum number of sessions per iSCSI login is recommended to be 1 by default. If additional sessions are needed this can be set in /etc/iscsi/iscsid.conf. If NCM is running, please change min_session_per_array in /etc/ncm.conf and restart nlt service instead", + "parameter": "nr_sessions", + "recommendation": "1" + }, + { + "category": "multipath", + "severity": "critical", + "description": "product attribute recommended to be set to Server in /etc/multipath.conf", + "parameter": "product", + "recommendation": "\"Server\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "alua prioritizer is recommended. Can be set in /etc/multipath.conf", + "parameter": "prio", + "recommendation": "alua" + }, + { + "category": "multipath", + "severity": "critical", + "description": "scsi_dh_alua device handler is recommended. Can be set in /etc/multipath.conf", + "parameter": "hardware_handler", + "recommendation": "\"1 alua\"" + }, + { + "category": "multipath", + "severity": "warning", + "description": "immediate failback setting is recommended. Can be set in /etc/multipath.conf", + "parameter": "failback", + "recommendation": "immediate" + }, + { + "category": "multipath", + "severity": "critical", + "description": "immediately fail i/o on transient path failures to retry on other paths, value=1. Can be set in /etc/multipath.conf", + "parameter": "fast_io_fail_tmo", + "recommendation": "5" + }, + { + "category": "multipath", + "severity": "critical", + "description": "queueing is recommended for 150 seconds, with no_path_retry value of 30. Can be set in /etc/multipath.conf", + "parameter": "no_path_retry", + "recommendation": "30" + }, + { + "category": "multipath", + "severity": "warning", + "description": "service-time path selector is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_selector", + "recommendation": "\"service-time 0\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "vendor attribute recommended to be set to Nimble in /etc/multipath.conf", + "parameter": "vendor", + "recommendation": "\"Nimble\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "group paths according to ALUA path priority of active/standby. Recommended to be set to group_by_prio in /etc/multipath.conf", + "parameter": "path_grouping_policy", + "recommendation": "group_by_prio" + }, + { + "category": "multipath", + "severity": "critical", + "description": "tur path checker is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_checker", + "recommendation": "tur" + }, + { + "category": "multipath", + "severity": "critical", + "description": "infinite value is recommended for timeout in cases of device loss for FC. Can be set in /etc/multipath.conf", + "parameter": "dev_loss_tmo", + "recommendation": "infinity" + } +] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/questions.yml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/questions.yml new file mode 100644 index 000000000..ac34ad76f --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/questions.yml @@ -0,0 +1,29 @@ +questions: +- variable: imagePullPolicy + label: "ImagePullPolicy" + default: "IfNotPresent" + type: enum + options: + - "IfNotPresent" + - "Always" + - "Never" + description: "ImagePullPolicy for all CSI driver images" + group: "HPE CSI Driver settings" +- variable: disableNodeConformance + label: "Disable automatic installation of iSCSI/Multipath Packages" + type: boolean + default: false + description: "Disable automatic installation of iSCSI/Multipath Packages" + group: "HPE CSI Driver settings" +- variable: iscsi.chapUser + label: "iSCSI CHAP Username" + type: string + required: true + description: "Specify username for iSCSI CHAP authentication" + group: "HPE iSCSI settings" +- variable: iscsi.chapPassword + label: "iSCSI CHAP Password" + type: password + required: true + description: "Specify password for iSCSI CHAP authentication" + group: "HPE iSCSI settings" diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/NOTES.txt b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/_helpers.tpl b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/_helpers.tpl new file mode 100644 index 000000000..165840d52 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hpe-csi-storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hpe-csi-storage.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hpe-csi-storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/csi-driver-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/csi-driver-crd.yaml new file mode 100644 index 000000000..39fbe4292 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/csi-driver-crd.yaml @@ -0,0 +1,16 @@ +{{- if semverCompare ">=1.14.0" .Capabilities.KubeVersion.GitVersion }} +--- + +################# CSI Driver ########### +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: csi.hpe.com +spec: + podInfoOnMount: true + {{- if semverCompare ">=1.16.0" .Capabilities.KubeVersion.GitVersion }} + volumeLifecycleModes: + - Persistent + - Ephemeral + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-controller.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-controller.yaml new file mode 100644 index 000000000..61705ec11 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-controller.yaml @@ -0,0 +1,171 @@ +--- + +############################################# +############ Controller driver ############ +############################################# + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: hpe-csi-controller + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: hpe-csi-controller + template: + metadata: + labels: + app: hpe-csi-controller + role: hpe-csi + spec: + serviceAccount: hpe-csi-controller-sa + {{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + priorityClassName: system-cluster-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-provisioner + {{- if semverCompare "<=1.16.0" .Capabilities.KubeVersion.GitVersion }} + image: quay.io/k8scsi/csi-provisioner:v1.4.0 + {{- else }} + image: quay.io/k8scsi/csi-provisioner:v1.5.0 + {{- end }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + {{- if semverCompare ">= 1.13.0" .Capabilities.KubeVersion.GitVersion }} + - "--timeout=30s" + - "--worker-threads=16" + {{- end }} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: csi-attacher + {{- if semverCompare "~1.13.0" .Capabilities.KubeVersion.GitVersion }} + image: quay.io/k8scsi/csi-attacher:v1.1.0 + {{- else }} + image: quay.io/k8scsi/csi-attacher:v2.1.1 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + {{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v2.0.1 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: "Always" + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + {{- end }} + {{- if semverCompare ">=1.15.0" .Capabilities.KubeVersion.GitVersion }} + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.4.0 + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + {{- end }} + - name: hpe-csi-driver + image: hpestorage/csi-driver:v1.3.0 + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--flavor=kubernetes" + - "--pod-monitor" + - "--pod-monitor-interval=30" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: log-dir + mountPath: /var/log + - name: k8s + mountPath: /etc/kubernetes + - name: hpeconfig + mountPath: /etc/hpe-storage + - name: root-dir + mountPath: /host + - name: csi-volume-mutator + image: quay.io/hpestorage/volume-mutator:v1.0.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-extensions + image: quay.io/hpestorage/csi-extensions:v1.0.0 + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi-extensions.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: {} + - name: log-dir + hostPath: + path: /var/log + - name: k8s + hostPath: + path: /etc/kubernetes + - name: hpeconfig + hostPath: + path: /etc/hpe-storage + - name: root-dir + hostPath: + path: / + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-node.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-node.yaml new file mode 100644 index 000000000..8a3953638 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-node.yaml @@ -0,0 +1,165 @@ +--- + +####################################### +############ Node driver ############ +####################################### + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: hpe-csi-node + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: hpe-csi-node + template: + metadata: + labels: + app: hpe-csi-node + role: hpe-csi + spec: + serviceAccount: hpe-csi-node-sa + {{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + priorityClassName: system-node-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + args: + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=5" + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/csi.hpe.com /registration/csi.hpe.com-reg.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.hpe.com/csi.sock + {{- if semverCompare "~1.12.0" .Capabilities.KubeVersion.GitVersion }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + {{- end }} + imagePullPolicy: "Always" + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration + - name: hpe-csi-driver + image: hpestorage/csi-driver:v1.3.0 + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--node-service" + - "--flavor=kubernetes" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + - name: CHAP_USER + value: {{ .Values.iscsi.chapUser }} + - name: CHAP_PASSWORD + value: {{ .Values.iscsi.chapPassword }} + {{- end }} + {{ if .Values.disableNodeConformance -}} + - name: DISABLE_NODE_CONFORMANCE + value: "true" + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: root-dir + mountPath: /host + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + - name: log-dir + mountPath: /var/log + - name: etc-hpe-storage-dir + mountPath: /etc/hpe-storage + - name: etc-kubernetes + mountPath: /etc/kubernetes + - name: sys + mountPath: /sys + - name: runsystemd + mountPath: /run/systemd + - name: etcsystemd + mountPath: /etc/systemd/system + - name: linux-config-file + mountPath: /opt/hpe-storage/nimbletune/config.json + subPath: config.json + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.hpe.com + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + - name: root-dir + hostPath: + path: / + - name: device-dir + hostPath: + path: /dev + - name: log-dir + hostPath: + path: /var/log + - name: etc-hpe-storage-dir + hostPath: + path: /etc/hpe-storage + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: runsystemd + hostPath: + path: /run/systemd + - name: etcsystemd + hostPath: + path: /etc/systemd/system + - name: sys + hostPath: + path: /sys + - name: linux-config-file + configMap: + name: hpe-linux-config + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-rbac.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-rbac.yaml new file mode 100644 index 000000000..bbde8540e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-csi-rbac.yaml @@ -0,0 +1,408 @@ +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["services"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +{{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +{{- end }} + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "create", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] + {{- if semverCompare "~1.12.0" .Capabilities.KubeVersion.GitVersion }} + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{- else if semverCompare "~1.13.0" .Capabilities.KubeVersion.GitVersion }} + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{ else }} + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + {{- end }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-attacher-role + apiGroup: rbac.authorization.k8s.io + + +{{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["create", "update", "delete", "get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "update", "delete", "get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +{{- end }} + + +{{- if semverCompare ">=1.15.0" .Capabilities.KubeVersion.GitVersion }} +--- +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-resizer-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Resizer must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ .Release.Namespace }} + name: external-resizer-cfg +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role-cfg + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: external-resizer-cfg + apiGroup: rbac.authorization.k8s.io + + +--- +# mutator must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + # replace with non-default namespace name + namespace: kube-system +roleRef: + kind: ClusterRole + name: csi-mutator-role + apiGroup: rbac.authorization.k8s.io + +--- +# mutator must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: csi-mutator-cfg +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role-cfg + namespace: kube-system +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: kube-system +roleRef: + kind: Role + name: csi-mutator-cfg + apiGroup: rbac.authorization.k8s.io +{{- end }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-role + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["hpenodeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpevolumeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpereplicationdeviceinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} + +roleRef: + kind: ClusterRole + name: hpe-csi-driver-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-linux-config.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-linux-config.yaml new file mode 100644 index 000000000..5e4c4944a --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-linux-config.yaml @@ -0,0 +1,13 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: hpe-linux-config + namespace: {{ .Release.Namespace }} +data: +{{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + CHAP_USER: {{ .Values.iscsi.chapUser | quote }} + CHAP_PASSWORD: {{ .Values.iscsi.chapPassword | quote }} +{{- end }} + config.json: |- +{{ (.Files.Get "files/config.json") | indent 4 }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-secret.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-secret.yaml new file mode 100644 index 000000000..f5b252758 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/hpe-secret.yaml @@ -0,0 +1,47 @@ +{{ if $secret := (lookup "v1" "Secret" .Release.Namespace "nimble-secret") -}} +# This is required to maintain backward compatibility of resources(secret) +# created with previous releases as we no longer create a secret with CSI driver install. +# Annotation "helm.sh/resource-policy": keep" should help to remove this file altogether +# in future. +# Also, manage the secret only if created by previous versions of helm with below check. +--- +apiVersion: v1 +kind: Secret +metadata: + name: nimble-secret + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} +data: + username: {{ $secret.data.username }} + password: {{ $secret.data.password }} + servicePort: {{ $secret.data.servicePort }} + backend: {{ $secret.data.backend }} + serviceName: {{ $secret.data.serviceName }} +{{- end }} + +{{ if $secret := (lookup "v1" "Secret" .Release.Namespace "primera3par-secret") -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: primera3par-secret + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} +data: + username: {{ $secret.data.username }} + password: {{ $secret.data.password }} + servicePort: {{ $secret.data.servicePort }} + backend: {{ $secret.data.backend }} + serviceName: {{ $secret.data.serviceName }} +{{- end }} + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/nimble-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/nimble-csp.yaml new file mode 100644 index 000000000..a2efbe5fb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/nimble-csp.yaml @@ -0,0 +1,60 @@ +--- +### CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: nimble-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: nimble-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: nimble-csp + +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nimble-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: nimble-csp + replicas: 1 + template: + metadata: + labels: + app: nimble-csp + spec: + serviceAccount: hpe-csp-sa + {{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: nimble-csp + image: hpestorage/nimble-csp:v1.3.0 + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/primera-3par-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/primera-3par-csp.yaml new file mode 100644 index 000000000..2e13db55e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/primera-3par-csp.yaml @@ -0,0 +1,61 @@ +--- +### CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: primera3par-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: primera3par-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: primera3par-csp + +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: primera3par-csp + labels: + app: primera3par-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: primera3par-csp + replicas: 1 + template: + metadata: + labels: + app: primera3par-csp + spec: + serviceAccount: hpe-csp-sa + {{- if semverCompare ">=1.17.0" .Capabilities.KubeVersion.GitVersion }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: primera3par-csp + image: hpestorage/hpe3parprimera-csp:v1.1.0 + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/sc.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/sc.yaml new file mode 100644 index 000000000..8d34426d8 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/templates/sc.yaml @@ -0,0 +1,38 @@ +{{ if $sc := (lookup "storage.k8s.io/v1" "StorageClass" "" "hpe-standard") -}} +# This is required to maintain backward compatibility of resources(storageclass) +# created with previous releases as we no longer create a storageclass with CSI driver install. +# Annotation "helm.sh/resource-policy": keep" should help to remove this file altogether +# in future. +# Also, manage the storageClass only if created by previous versions of helm with below check. +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: hpe-standard + labels: + plugin: {{ $.Release.Name }} + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} + {{- range $k, $v := $sc.metadata.annotations }} + {{- if or (eq $v "true") (eq $v "false")}} + {{ $k }}: {{ $v | quote }} + {{- else }} + {{ $k }}: {{ $v }} + {{- end }} + {{- end }} +provisioner: csi.hpe.com +{{- if semverCompare ">=1.15.0" .Capabilities.KubeVersion.GitVersion }} +allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} +{{- end }} +parameters: + {{- range $k, $v := $sc.parameters }} + {{- if or (eq $v "true") (eq $v "false")}} + {{ $k }}: {{ $v | quote }} + {{- else }} + {{ $k }}: {{ $v }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/values.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/values.yaml new file mode 100644 index 000000000..a05ed5b12 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.3.000/values.yaml @@ -0,0 +1,20 @@ +# Default values for hpe-csi-storage. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# image pull policy for all images in csi deployment +imagePullPolicy: 'IfNotPresent' + +# flavor +flavor: kubernetes + +# log level for all csi driver components +logLevel: info + +## For controlling automatic iscsi/multipath package installation (default: false) +disableNodeConformance: false + +# values for CHAP Authentication +iscsi: + chapUser: "" + chapPassword: "" \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/Chart.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/Chart.yaml new file mode 100644 index 000000000..796edcf93 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/Chart.yaml @@ -0,0 +1,19 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-csi-driver +apiVersion: v1 +appVersion: 1.4.0 +description: A Helm chart for installing the HPE CSI Driver for Kubernetes +home: https://hpe.com/storage/containers +icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png +keywords: +- HPE +- Storage +- StorageClass +maintainers: +- email: hpe-containers-dev@hpe.com + name: raunakkumar +name: hpe-csi-driver +sources: +- https://scod.hpedev.io/csi_driver +version: 1.4.200 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/README.md b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/README.md new file mode 100644 index 000000000..3ffbdd953 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/README.md @@ -0,0 +1,101 @@ +# HPE CSI Driver for Kubernetes Helm chart + +The [HPE CSI Driver for Kubernetes](https://scod.hpedev.io/csi_driver/index.html) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. + +## Prerequisites + +- Upstream Kubernetes version >= 1.15 +- Most Kubernetes distributions are supported +- Recent Ubuntu, SLES, CentOS or RHEL compute nodes connected to their respective official package repositories +- Helm 3 (Version >= 3.2.0 required) + +Depending on which [Container Storage Provider](https://scod.hpedev.io/container_storage_provider/index.html) (CSP) is being used, other prerequisites and requirements may apply, such as storage platform OS and features. + +- [HPE Nimble Storage](https://scod.hpedev.io/container_storage_provider/hpe_nimble_storage/index.html) +- [HPE 3PAR and Primera](https://scod.hpedev.io/container_storage_provider/hpe_3par_primera/index.html) + +## Configuration and installation + +The following table lists the configurable parameters of the HPE-CSI chart and their default values. + +| Parameter | Description | Default | +|---------------------------|------------------------------------------------------------------------|--------------| +| logLevel | Log level. Can be one of `info`, `debug`, `trace`, `warn` and `error`. | info | +| imagePullPolicy | Image pull policy (`Always`, `IfNotPresent`, `Never`). | IfNotPresent | +| disableNodeConformance | Disable automatic installation of iSCSI/Multipath Packages. | false | +| iscsi.chapUser | Username for iSCSI CHAP authentication. | "" | +| iscsi.chapPassword | Password for iSCSI CHAP authentication. | "" | +| registry | Registry to pull HPE CSI Driver container images from. | quay.io | + +It's recommended to create a [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver) file from the corresponding release of the chart and edit it to fit the environment the chart is being deployed to. Download and edit [a sample file](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). + +These are the bare minimum required parameters for a successful deployment to an iSCSI environment if CHAP authentication is required. + +``` +iscsi: + chapUser: "" + chapPassword: "" +``` + +Tweak any additional parameters to suit the environment or as prescribed by HPE. + +### Installing the chart + +To install the chart with the name `my-hpe-csi-driver`: + +Add HPE helm repo: + +``` +helm repo add hpe-storage https://hpe-storage.github.io/co-deployments/ +helm repo update +``` + +Install the latest chart: + +``` +kubectl create ns hpe-storage +helm install my-hpe-csi-driver hpe-storage/hpe-csi-driver -n hpe-storage -f myvalues.yaml +``` + +**Note**: `values.yaml` is optional if no parameters are overridden from defaults. + +### Upgrading the chart + +Due to the [helm limitation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) to not support upgrade of CRDs between different chart versions, `hpe-csi-driver` helm chart upgrade is not supported. +Our recommendation is to uninstall the existing chart and install the chart with the desired version. CRDs will be preserved between uninstall and install. + +### Uninstalling the chart + +To uninstall the `my-hpe-csi-driver` chart: + +``` +helm uninstall my-hpe-csi-driver -n hpe-storage +``` + +**Note**: Due to a limitation in Helm, CRDs are not deleted as part of the chart uninstall. + +### Alternative install method + +In some cases it's more practical to provide the local configuration via the `helm` CLI directly. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. These will take precedence over entries in [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). For example: + +``` +helm install my-hpe-csi-driver hpe-storage/hpe-csi-driver -n hpe-storage \ + --set iscsi.chapUsername=admin \ + --set iscsi.chapPassword=xxxxxxxx +``` + +## Using persistent storage with Kubernetes + +Enable dynamic provisioning of persistent storage by creating a `StorageClass` API object that references a `Secret` which maps to a supported HPE primary storage backend. Refer to the [HPE CSI Driver for Kubernetes](https://scod.hpedev.io/csi_driver/using.html) documentation on [HPE Storage Container Orchestration Documentation](https://scod.hpedev.io/). Also, it's helpful to be familiar with [persistent storage concepts](https://kubernetes.io/docs/concepts/storage/volumes/) in Kubernetes prior to deploying stateful workloads. + +## Support + +The HPE CSI Driver for Kubernetes Helm chart is covered by your HPE support contract. Please file any issues, questions or feature requests [here](https://github.com/hpe-storage/co-deployments/issues) or contact HPE through the regular support channels. You may also join our Slack community to chat with HPE folks close to this project. We hang out in `#NimbleStorage`, `#3par-primera` and `#Kubernetes` at [hpedev.slack.com](https://hpedev.slack.com), sign up here: [slack.hpedev.io](https://slack.hpedev.io/). + +## Contributing + +We value all feedback and contributions. If you find any issues or want to contribute, please feel free to open an issue or file a PR. More details in [CONTRIBUTING.md](https://github.com/hpe-storage/co-deployments/blob/master/CONTRIBUTING.md) + +## License + +This is open source software licensed using the Apache License 2.0. Please see [LICENSE](https://github.com/hpe-storage/co-deployments/blob/master/LICENSE) for details. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/app-readme.md b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/app-readme.md new file mode 100644 index 000000000..29ca912cb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/app-readme.md @@ -0,0 +1,3 @@ +# HPE CSI Driver for Kubernetes + +The [HPE CSI Driver for Kubernetes](https://github.com/hpe-storage/csi-driver) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-nodeinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-nodeinfo-crd.yaml new file mode 100644 index 000000000..86c60cf34 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-nodeinfo-crd.yaml @@ -0,0 +1,70 @@ +--- +############################################# +############ HPE Node Info CRD ############ +############################################# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpenodeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPENodeInfo + plural: hpenodeinfos + scope: Cluster + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object." + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents" + type: string + spec: + description: "spec defines the desired characteristics of a HPE nodeinfo requested by a user." + properties: + chapPassword: + description: "The CHAP Password" + type: string + chapUser: + description: "The CHAP User Name" + type: string + iqns: + description: "List of IQNs configured on the node." + items: + type: string + type: array + networks: + description: "List of networks configured on the node." + items: + type: string + type: array + uuid: + description: "The UUID of the node." + type: string + wwpns: + description: "List of WWPNs configured on the node." + items: + type: string + type: array + required: + - uuid + - networks + type: object + required: + - spec + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-replicated-device-info-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-replicated-device-info-crd.yaml new file mode 100644 index 000000000..253d87d05 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-replicated-device-info-crd.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpereplicationdeviceinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEReplicationDeviceInfo + plural: hpereplicationdeviceinfos + shortNames: + - hperdi + + scope: Cluster + validation: + openAPIV3Schema: + properties: + hpeReplicationDeviceInfos: + description: List of HPE Replicated Arrays configured for 3PAR/Primera arrays. + items: + properties: + targets: + description: Target Array Details + type: array + items: + properties: + targetName: + description: Target Name of the array + type: string + targetCpg: + description: Target CPG of the array + type: string + targetSnapCpg: + description: Target Snap CPG of the array + type: string + targetSecret: + description: Secret of the replicated array + type: string + targetMode: + description: Replication Mode + type: string + targetSecretNamespace: + description: Namespace of secret + type: string + required: + - targetName + - targetCpg + - targetSecret + - targetSecretNamespace + type: array + version: v1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-snapshotgroupinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-snapshotgroupinfo-crd.yaml new file mode 100644 index 000000000..76789eda9 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-snapshotgroupinfo-crd.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpesnapshotgroupinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPESnapshotGroupInfo + plural: hpesnapshotgroupinfos + shortNames: + - hpesgi + scope: Cluster + version: v1 + validation: + openAPIV3Schema: + properties: + hpeSnapshotGroupInfos: + description: List of HPE snapshot groups created for 3PAR/Primera arrays. + type: array + items: + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: map[string]string + + snapshotVolumes: + description: Snapshot volumes that are part of this snapshot group + type: array + items: + properties: + srcVolumeId: + description: ID of the volume that is the source of this snapshot volume + type: string + + srcVolumeName: + description: Name of the volume that is the source of this snapshot volume + type: string + + snapshotId: + description: Snapshot volume Id + type: string + + snapshotName: + description: Snapshot volume name + type: string + +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumegroupinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumegroupinfo-crd.yaml new file mode 100644 index 000000000..c9e58e415 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumegroupinfo-crd.yaml @@ -0,0 +1,64 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpevolumegroupinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEVolumeGroupInfo + plural: hpevolumegroupinfos + shortNames: + - hpevgi + scope: Cluster + version: v1 + validation: + openAPIV3Schema: + properties: + hpeVolumeGroupInfos: + description: List of HPE volume groups configured for 3PAR/Primera arrays. + type: array + items: + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: map[string]string + + snapshotGroups: + description: Snapshot groups that are linked to this volume group + type: array + items: + properties: + id: + description: ID of the snapshot group + type: string + + name: + description: Name of the snapshot group + type: string + + volumes: + description: Volumes that are members in this volume group + type: array + items: + properties: + volumeId: + description: ID of the member volume + type: string + + volumeName: + description: Name of the member volume + type: string + + + +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumeinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumeinfo-crd.yaml new file mode 100644 index 000000000..417f4f4f3 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/hpe-volumeinfo-crd.yaml @@ -0,0 +1,32 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpevolumeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEVolumeInfo + plural: hpevolumeinfos + scope: Cluster + validation: + openAPIV3Schema: + properties: + hpeVolumes: + description: List of HPE volumes configured for 3PAR/Primera arrays. + items: + properties: + uuid: + description: The UUID of the volume. + type: string + record: + description: Metadata for the volume. + type: map[string]string + type: array + version: v1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupclasses.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupclasses.yaml new file mode 100644 index 000000000..b58878471 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupclasses.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroupclasses.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroupClass + listKind: SnapshotGroupClassList + plural: snapshotgroupclasses + singular: snapshotgroupclass + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroupClass specifies parameters that a underlying + storage system uses when creating a volumegroup snapshot. A specific SnapshotGroupClass + is used by specifying its name in a VolumeGroupSnapshot object. SnapshotGroupClasses + are non-namespaced + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + deletionPolicy: + description: deletionPolicy determines whether a SnapshotGroupContent + created through the SnapshotGroupClass should be deleted when its + bound SnapshotGroup is deleted. Supported values are "Retain" and + "Delete". "Retain" means that the SnapshotGroupContent and its physical + snapshotGroup on underlying storage system are kept. "Delete" means that + the SnapshotGroupContent and its physical snapshotGroup on underlying + storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + snapshotter: + description: snapshotter is the name of the storage driver that handles this + SnapshotGroupClass. Required. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshotGroups. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - snapshotter + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupcontents.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupcontents.yaml new file mode 100644 index 000000000..a7132c59e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroupcontents.yaml @@ -0,0 +1,104 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroupcontents.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroupContent + listKind: SnapshotGroupContentList + plural: snapshotgroupcontents + singular: snapshotgroupcontent + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroupContent represents the actual "on-disk" snapshotGroup + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a SnapshotGroupContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this SnapshotGroupContent + and its physical snapshotgroup on the underlying storage system should + be deleted when its bound SnapshotGroup is deleted. Supported + values are "Retain" and "Delete". "Retain" means that the SnapshotGroupContent + and its physical snapshotGroup on underlying storage system are kept. + "Delete" means that the SnapshotGroupContent and its physical + snapshotGroup on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + source: + description: source specifies from where a snapshotGroup will be created.Required. + properties: + snapshotGroupHandle: + description: snapshotGroupHandle specifies the snapshotGroup Id + of a pre-existing snapshotGroup on the underlying storage system. + This field is immutable. + type: string + type: object + snapshotGroupClassName: + description: name of the SnapshotGroupClass to which this snapshotGroup belongs. + type: string + snapshotGroupRef: + description: snapshotGroupRef specifies the SnapshotGroup object + to which this SnapshotGroupContent object is bound. SnapshotGroup.Spec.SnapshotGroupContentName + field must reference to this SnapshotGroupContent's name for + the bidirectional binding to be valid. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + volumeSnapshotContentNames: + description: list of volumeSnapshotContentNames associated with this snapshotGroups + type: array + items: + type: string + required: + - deletionPolicy + - source + - snapshotGroupClassName + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroups.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroups.yaml new file mode 100644 index 000000000..3372a7db7 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_snapshotgroups.yaml @@ -0,0 +1,83 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroups.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroup + listKind: SnapshotGroupList + plural: snapshotgroups + singular: snapshotgroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroup is a user's request for creating a snapshotgroup + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents' + type: string + spec: + description: spec defines the desired characteristics of a snapshotGroup + requested by a user. + Required. + properties: + source: + description: source specifies where a snapshotGroup will be created. + This field is immutable after creation. Required. + properties: + kind: + description: kind of the source (VolumeGroup) is the only supported one. + type: string + apiGroup: + description: apiGroup of the source. Current supported is storage.hpe.com + type: string + name: + description: name specifies the volumeGroupName of the VolumeGroup object in the same namespace as the SnapshotGroup object where the snapshotGroup should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the volumeSnapshotClass to create pre-provisioned snapshots + type: string + snapshotGroupClassName: + description: snapshotGroupClassName is the name of the SnapshotGroupClass requested by the SnapshotGroup. + type: string + snapshotGroupContentName: + description: snapshotGroupContentName is the name of the snapshotGroupContent the snapshotGroup is bound. + type: string + required: + - source + - volumeSnapshotClassName + - snapshotGroupClassName + type: object + status: + description: status represents the current information of a snapshotGroup. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshotGroup is taken by the underlying storage system. + format: date-time + type: string + phase: + description: the state of the snapshotgroup + enum: + - Pending + - Ready + - Failed + type: string + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupclasses.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupclasses.yaml new file mode 100644 index 000000000..e201ec94e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupclasses.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroupclasses.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroupClass + listKind: VolumeGroupClassList + plural: volumegroupclasses + singular: volumegroupclass + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroupClass specifies parameters that a underlying + storage system uses when creating a volumegroup. A specific VolumeGroupClass + is used by specifying its name in a VolumeGroup object. VolumeGroupClasses + are non-namespaced + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeGroupContent + created through the VolumeGroupClass should be deleted when its + bound VolumeGroup is deleted. Supported values are "Retain" and + "Delete". "Retain" means that the VolumeGroupContent and its physical + volumeGroup on underlying storage system are kept. "Delete" means that + the VolumeGroupContent and its physical volumeGroup on underlying + storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + provisioner: + description: provisioner is the name of the storage driver that handles this + VolumeGroupClass. Required. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating volumeGroups. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - provisioner + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupcontents.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupcontents.yaml new file mode 100644 index 000000000..d944909eb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroupcontents.yaml @@ -0,0 +1,96 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroupcontents.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroupContent + listKind: VolumeGroupContentList + plural: volumegroupcontents + singular: volumegroupcontent + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroupContent represents the actual "on-disk" volumeGroup + object in the underlying storage system + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + spec: + description: spec defines properties of a VolumeGroupContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeGroupContent + and its physical volumegroup on the underlying storage system should + be deleted when its bound VolumeGroup is deleted. Supported + values are "Retain" and "Delete". "Retain" means that the VolumeGroupContent + and its physical volumeGroup on underlying storage system are kept. + "Delete" means that the VolumeGroupContent and its physical + volumeGroup on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + source: + description: source specifies from where a volumeGroup will be created.Required. + properties: + volumeGroupHandle: + description: volumeGroupHandle specifies the volumeGroup Id + of a pre-existing volumeGroup on the underlying storage system. + This field is immutable. + type: string + type: object + volumeGroupClassName: + description: name of the VolumeGroupClass to which this volumeGroup belongs. + type: string + volumeGroupRef: + description: volumeGroupRef specifies the VolumeGroup object + to which this VolumeGroupContent object is bound. VolumeGroup.Spec.VolumeGroupContentName + field must reference to this VolumeGroupContent's name for + the bidirectional binding to be valid. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - source + - volumeGroupClassName + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroups.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroups.yaml new file mode 100644 index 000000000..862b4398a --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/crds/storage.hpe.com_volumegroups.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroups.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroup + listKind: VolumeGroupList + plural: volumegroups + singular: volumegroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroup is a user's request for creating a volumegroup + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents' + type: string + spec: + description: spec defines the desired characteristics of a volumeGroup + requested by a user. + Required. + properties: + volumeGroupClassName: + description: name of the volumeGroupClassName to create volumeGroups + type: string + persistentVolumeClaimNames: + description: persistentVolumeClaimNames are the name of the PVC associated with this volumeGroup. + type: array + items: + type: string + volumeGroupContentName: + description: volumeGroupContentName is the name of the volumeGroupContent to which the volumeGroup is bound. + type: string + required: + - volumeGroupClassName + type: object + status: + description: status represents the current information of a volumeGroup. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + volumeGroup is taken by the underlying storage system. + format: date-time + type: string + phase: + description: the state of the volumegroup + enum: + - Pending + - Ready + - Failed + type: string + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/files/config.json b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/files/config.json new file mode 100644 index 000000000..d00650184 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/files/config.json @@ -0,0 +1,128 @@ +[ + { + "category": "iscsi", + "severity": "warning", + "description": "Manual startup of iSCSI nodes on boot. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "startup", + "recommendation": "manual" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Replacement_timeout of 10 seconds is recommended for faster failover of I/O by multipath on path failures. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "replacement_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum login timeout of 15 seconds is recommended with iSCSI. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "login_timeout", + "recommendation": "15" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum timeout of 10 seconds is recommended with noop requests. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "noop_out_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum cmds_max of 512 is recommended for each session if handling multiple LUN's. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "cmds_max", + "recommendation": "512" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum queue_depth of 256 is recommended for each iSCSI session/path. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "queue_depth", + "recommendation": "256" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum number of sessions per iSCSI login is recommended to be 1 by default. If additional sessions are needed this can be set in /etc/iscsi/iscsid.conf. If NCM is running, please change min_session_per_array in /etc/ncm.conf and restart nlt service instead", + "parameter": "nr_sessions", + "recommendation": "1" + }, + { + "category": "multipath", + "severity": "critical", + "description": "product attribute recommended to be set to Server in /etc/multipath.conf", + "parameter": "product", + "recommendation": "\"Server\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "alua prioritizer is recommended. Can be set in /etc/multipath.conf", + "parameter": "prio", + "recommendation": "alua" + }, + { + "category": "multipath", + "severity": "critical", + "description": "scsi_dh_alua device handler is recommended. Can be set in /etc/multipath.conf", + "parameter": "hardware_handler", + "recommendation": "\"1 alua\"" + }, + { + "category": "multipath", + "severity": "warning", + "description": "immediate failback setting is recommended. Can be set in /etc/multipath.conf", + "parameter": "failback", + "recommendation": "immediate" + }, + { + "category": "multipath", + "severity": "critical", + "description": "immediately fail i/o on transient path failures to retry on other paths, value=1. Can be set in /etc/multipath.conf", + "parameter": "fast_io_fail_tmo", + "recommendation": "5" + }, + { + "category": "multipath", + "severity": "critical", + "description": "queueing is recommended for 150 seconds, with no_path_retry value of 30. Can be set in /etc/multipath.conf", + "parameter": "no_path_retry", + "recommendation": "30" + }, + { + "category": "multipath", + "severity": "warning", + "description": "service-time path selector is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_selector", + "recommendation": "\"service-time 0\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "vendor attribute recommended to be set to Nimble in /etc/multipath.conf", + "parameter": "vendor", + "recommendation": "\"Nimble\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "group paths according to ALUA path priority of active/standby. Recommended to be set to group_by_prio in /etc/multipath.conf", + "parameter": "path_grouping_policy", + "recommendation": "group_by_prio" + }, + { + "category": "multipath", + "severity": "critical", + "description": "tur path checker is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_checker", + "recommendation": "tur" + }, + { + "category": "multipath", + "severity": "critical", + "description": "infinite value is recommended for timeout in cases of device loss for FC. Can be set in /etc/multipath.conf", + "parameter": "dev_loss_tmo", + "recommendation": "infinity" + } +] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/questions.yml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/questions.yml new file mode 100644 index 000000000..dee65bb8e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/questions.yml @@ -0,0 +1,37 @@ +labels: + io.rancher.certified: partner +questions: +- variable: imagePullPolicy + label: "ImagePullPolicy" + default: "IfNotPresent" + type: enum + options: + - "IfNotPresent" + - "Always" + - "Never" + description: "ImagePullPolicy for all CSI driver images" + group: "HPE CSI Driver settings" +- variable: disableNodeConformance + label: "Disable automatic installation of iSCSI/Multipath Packages" + type: boolean + default: false + description: "Disable automatic installation of iSCSI/Multipath Packages" + group: "HPE CSI Driver settings" +- variable: iscsi.chapUser + label: "iSCSI CHAP Username" + type: string + required: false + description: "Specify username for iSCSI CHAP authentication" + group: "HPE iSCSI settings" +- variable: iscsi.chapPassword + label: "iSCSI CHAP Password" + type: password + required: false + description: "Specify password for iSCSI CHAP authentication" + group: "HPE iSCSI settings" +- variable: registry + label: "Registry" + type: string + default: "quay.io" + description: "Specify registry prefix (hostname[:port]) for CSI driver images" + group: "HPE CSI Driver settings" diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/NOTES.txt b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/_helpers.tpl b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/_helpers.tpl new file mode 100644 index 000000000..165840d52 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hpe-csi-storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hpe-csi-storage.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hpe-csi-storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/csi-driver-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/csi-driver-crd.yaml new file mode 100644 index 000000000..61275fffa --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/csi-driver-crd.yaml @@ -0,0 +1,24 @@ + + + +--- + +################# CSI Driver ########### +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "18") }} +apiVersion: storage.k8s.io/v1 +{{- else if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "14") }} +apiVersion: storage.k8s.io/v1beta1 +{{- end }} + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "14") }} +kind: CSIDriver +metadata: + name: csi.hpe.com +spec: + podInfoOnMount: true + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "16") }} + volumeLifecycleModes: + - Persistent + - Ephemeral + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-controller.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-controller.yaml new file mode 100644 index 000000000..c1a3e3e39 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-controller.yaml @@ -0,0 +1,223 @@ +--- + +############################################# +############ Controller driver ############ +############################################# + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: hpe-csi-controller + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: hpe-csi-controller + template: + metadata: + labels: + app: hpe-csi-controller + role: hpe-csi + spec: + serviceAccount: hpe-csi-controller-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-provisioner + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-provisioner:v1.5.0 + {{- else }} + image: quay.io/k8scsi/csi-provisioner:v1.5.0 + {{- end }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "13") }} + - "--timeout=30s" + - "--worker-threads=16" + {{- end }} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: csi-attacher + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-attacher:v2.1.1 + {{- else }} + image: quay.io/k8scsi/csi-attacher:v2.1.1 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + - name: csi-snapshotter + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-snapshotter:v3.0.2 + {{- else }} + image: quay.io/k8scsi/csi-snapshotter:v3.0.2 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + {{- end }} + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "15") }} + - name: csi-resizer + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-resizer:v0.4.0 + {{- else }} + image: quay.io/k8scsi/csi-resizer:v0.4.0 + {{- end }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + {{- end }} + - name: hpe-csi-driver + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-driver:v1.4.0 + {{- else }} + image: quay.io/hpestorage/csi-driver:v1.4.0 + {{- end }} + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--flavor=kubernetes" + - "--pod-monitor" + - "--pod-monitor-interval=30" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: log-dir + mountPath: /var/log + - name: k8s + mountPath: /etc/kubernetes + - name: hpeconfig + mountPath: /etc/hpe-storage + - name: root-dir + mountPath: /host + - name: csi-volume-mutator + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-mutator:v1.2.0 + {{- else }} + image: quay.io/hpestorage/volume-mutator:v1.2.0 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-volume-group-snapshotter + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-group-snapshotter:v1.0.0 + {{- else }} + image: quay.io/hpestorage/volume-group-snapshotter:v1.0.0 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-volume-group-provisioner + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-group-provisioner:v1.0.0 + {{- else }} + image: quay.io/hpestorage/volume-group-provisioner:v1.0.0 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-extensions + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-extensions:v1.2.0 + {{- else }} + image: quay.io/hpestorage/csi-extensions:v1.2.0 + {{- end }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi-extensions.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: {} + - name: log-dir + hostPath: + path: /var/log + - name: k8s + hostPath: + path: /etc/kubernetes + - name: hpeconfig + hostPath: + path: /etc/hpe-storage + - name: root-dir + hostPath: + path: / + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-node.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-node.yaml new file mode 100644 index 000000000..d6657199b --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-node.yaml @@ -0,0 +1,171 @@ +--- + +####################################### +############ Node driver ############ +####################################### + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: hpe-csi-node + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: hpe-csi-node + template: + metadata: + labels: + app: hpe-csi-node + role: hpe-csi + spec: + serviceAccount: hpe-csi-node-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-node-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-node-driver-registrar + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-node-driver-registrar:v2.0.1 + {{- else }} + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + {{- end}} + args: + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=5" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.hpe.com/csi.sock + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "12") }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + {{- end }} + imagePullPolicy: "Always" + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: hpe-csi-driver + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-driver:v1.4.0 + {{- else }} + image: quay.io/hpestorage/csi-driver:v1.4.0 + {{- end}} + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--node-service" + - "--flavor=kubernetes" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + - name: CHAP_USER + value: {{ .Values.iscsi.chapUser }} + - name: CHAP_PASSWORD + value: {{ .Values.iscsi.chapPassword }} + {{- end }} + {{ if .Values.disableNodeConformance -}} + - name: DISABLE_NODE_CONFORMANCE + value: "true" + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: root-dir + mountPath: /host + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + - name: log-dir + mountPath: /var/log + - name: etc-hpe-storage-dir + mountPath: /etc/hpe-storage + - name: etc-kubernetes + mountPath: /etc/kubernetes + - name: sys + mountPath: /sys + - name: runsystemd + mountPath: /run/systemd + - name: etcsystemd + mountPath: /etc/systemd/system + - name: linux-config-file + mountPath: /opt/hpe-storage/nimbletune/config.json + subPath: config.json + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.hpe.com + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + - name: root-dir + hostPath: + path: / + - name: device-dir + hostPath: + path: /dev + - name: log-dir + hostPath: + path: /var/log + - name: etc-hpe-storage-dir + hostPath: + path: /etc/hpe-storage + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: runsystemd + hostPath: + path: /run/systemd + - name: etcsystemd + hostPath: + path: /etc/systemd/system + - name: sys + hostPath: + path: /sys + - name: linux-config-file + configMap: + name: hpe-linux-config + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-rbac.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-rbac.yaml new file mode 100644 index 000000000..1da7d7c2e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-csi-rbac.yaml @@ -0,0 +1,563 @@ +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["services"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +{{- end }} + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "create", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "12") }} + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{- else if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "13") }} + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{ else }} + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + {{- end }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-attacher-role + apiGroup: rbac.authorization.k8s.io + + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["create", "update", "delete", "get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "update", "delete", "get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +{{- end }} + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "15") }} +--- +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-resizer-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Resizer must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ .Release.Namespace }} + name: external-resizer-cfg +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role-cfg + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: external-resizer-cfg + apiGroup: rbac.authorization.k8s.io + + +--- +# cluster role to support volumegroup +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-volumegroup-role +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupclasses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-volumegroup-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-volumegroup-role + apiGroup: rbac.authorization.k8s.io + +--- +# cluster role to support snapshotgroup +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotgroup-role +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroups"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupcontents"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupclasses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroups/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupcontents/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotgroup-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-snapshotgroup-role + apiGroup: rbac.authorization.k8s.io + +--- +# mutator must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + # replace with non-default namespace name + namespace: {{ .Release.Namespace }} + +roleRef: + kind: ClusterRole + name: csi-mutator-role + apiGroup: rbac.authorization.k8s.io + +--- +# mutator must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ .Release.Namespace }} + name: csi-mutator-cfg +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role-cfg + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + +roleRef: + kind: Role + name: csi-mutator-cfg + apiGroup: rbac.authorization.k8s.io +{{- end }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-role + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["hpenodeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpevolumeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpereplicationdeviceinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpevolumegroupinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpesnapshotgroupinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} + +roleRef: + kind: ClusterRole + name: hpe-csi-driver-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-linux-config.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-linux-config.yaml new file mode 100644 index 000000000..5e4c4944a --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-linux-config.yaml @@ -0,0 +1,13 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: hpe-linux-config + namespace: {{ .Release.Namespace }} +data: +{{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + CHAP_USER: {{ .Values.iscsi.chapUser | quote }} + CHAP_PASSWORD: {{ .Values.iscsi.chapPassword | quote }} +{{- end }} + config.json: |- +{{ (.Files.Get "files/config.json") | indent 4 }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-secret.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-secret.yaml new file mode 100644 index 000000000..f5b252758 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/hpe-secret.yaml @@ -0,0 +1,47 @@ +{{ if $secret := (lookup "v1" "Secret" .Release.Namespace "nimble-secret") -}} +# This is required to maintain backward compatibility of resources(secret) +# created with previous releases as we no longer create a secret with CSI driver install. +# Annotation "helm.sh/resource-policy": keep" should help to remove this file altogether +# in future. +# Also, manage the secret only if created by previous versions of helm with below check. +--- +apiVersion: v1 +kind: Secret +metadata: + name: nimble-secret + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} +data: + username: {{ $secret.data.username }} + password: {{ $secret.data.password }} + servicePort: {{ $secret.data.servicePort }} + backend: {{ $secret.data.backend }} + serviceName: {{ $secret.data.serviceName }} +{{- end }} + +{{ if $secret := (lookup "v1" "Secret" .Release.Namespace "primera3par-secret") -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: primera3par-secret + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} +data: + username: {{ $secret.data.username }} + password: {{ $secret.data.password }} + servicePort: {{ $secret.data.servicePort }} + backend: {{ $secret.data.backend }} + serviceName: {{ $secret.data.serviceName }} +{{- end }} + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/nimble-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/nimble-csp.yaml new file mode 100644 index 000000000..332b95d5b --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/nimble-csp.yaml @@ -0,0 +1,64 @@ +--- +### CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: nimble-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: nimble-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: nimble-csp + +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nimble-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: nimble-csp + replicas: 1 + template: + metadata: + labels: + app: nimble-csp + spec: + serviceAccount: hpe-csp-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: nimble-csp + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/nimble-csp:v1.4.0 + {{- else }} + image: quay.io/hpestorage/nimble-csp:v1.4.0 + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/primera-3par-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/primera-3par-csp.yaml new file mode 100644 index 000000000..5b1c65e2d --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/primera-3par-csp.yaml @@ -0,0 +1,65 @@ +--- +### CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: primera3par-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: primera3par-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: primera3par-csp + +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: primera3par-csp + labels: + app: primera3par-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: primera3par-csp + replicas: 1 + template: + metadata: + labels: + app: primera3par-csp + spec: + serviceAccount: hpe-csp-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: primera3par-csp + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/hpe3parprimera-csp:v1.2.0 + {{- else }} + image: quay.io/hpestorage/hpe3parprimera-csp:v1.2.0 + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/sc.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/sc.yaml new file mode 100644 index 000000000..a864865bf --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/templates/sc.yaml @@ -0,0 +1,38 @@ +{{ if $sc := (lookup "storage.k8s.io/v1" "StorageClass" "" "hpe-standard") -}} +# This is required to maintain backward compatibility of resources(storageclass) +# created with previous releases as we no longer create a storageclass with CSI driver install. +# Annotation "helm.sh/resource-policy": keep" should help to remove this file altogether +# in future. +# Also, manage the storageClass only if created by previous versions of helm with below check. +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: hpe-standard + labels: + plugin: {{ $.Release.Name }} + app.kubernetes.io/managed-by: Helm + annotations: + helm.sh/resource-policy: keep + meta.helm.sh/release-name: {{ .Release.Name }} + meta.helm.sh/release-namespace: {{ .Release.Namespace }} + {{- range $k, $v := $sc.metadata.annotations }} + {{- if or (eq $v "true") (eq $v "false")}} + {{ $k }}: {{ $v | quote }} + {{- else }} + {{ $k }}: {{ $v }} + {{- end }} + {{- end }} +provisioner: csi.hpe.com +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "15") }} +allowVolumeExpansion: {{ $sc.allowVolumeExpansion }} +{{- end }} +parameters: + {{- range $k, $v := $sc.parameters }} + {{- if or (eq $v "true") (eq $v "false")}} + {{ $k }}: {{ $v | quote }} + {{- else }} + {{ $k }}: {{ $v }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/values.yaml b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/values.yaml new file mode 100644 index 000000000..edf47eaee --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/1.4.200/values.yaml @@ -0,0 +1,24 @@ +# Default values for hpe-csi-storage. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# image pull policy for all images in csi deployment +imagePullPolicy: 'IfNotPresent' + +# flavor +flavor: kubernetes + +# log level for all csi driver components +logLevel: info + +## For controlling automatic iscsi/multipath package installation (default: false) +disableNodeConformance: false + +# values for CHAP Authentication +iscsi: + chapUser: "" + chapPassword: "" + +# registry prefix for hpe csi images +registry: "quay.io" + diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/Chart.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/Chart.yaml new file mode 100644 index 000000000..3a61ef6be --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-flexvolume-driver +apiVersion: v1 +appVersion: "3.1" +description: A Helm chart for installing the HPE Volume Driver for Kubernetes FlexVolume + plugin +home: https://hpe.com/storage/containers +icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png +keywords: +- HPE +- Storage +- StorageClass +- CentOS +- Ubuntu +- CloudVolumes +maintainers: +- email: hpe-containers-dev@hpe.com + name: shivamerla +name: hpe-flexvolume-driver +sources: +- https://github.com/hpe-storage/flexvolume-driver +version: 3.1.000 diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/README.md b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/README.md new file mode 100644 index 000000000..6cf393897 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/README.md @@ -0,0 +1,173 @@ +# HPE Volume Driver for Kubernetes FlexVolume Plugin Helm chart +The [HPE Volume Driver for Kubernetes FlexVolume Plugin](https://github.com/hpe-storage/flexvolume-driver) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. This chart also deploys the [HPE Dynamic Provisioner for Kubernetes](https://github.com/hpe-storage/k8s-dynamic-provisioner). + +## Prerequisites + +- Upstream Kubernetes version 1.11 or later +- Other Kubernetes distributions supported +- Rancher 2.x +- OpenShift 3.10, 3.11 (4.x will not be supported, see [CSI Driver Helm chart](https://github.com/hpe-storage/co-deployments/tree/master/helm/charts/hpe-csi-driver)) +- More distributions will be listed as tests are ongoing +- Recent Ubuntu, CentOS or RHEL compute nodes connected to their respective official package repositories + +Depending on which `pluginType` is being used, other prerequisites and requirements may apply. + +### HPE Nimble Storage (nimble) + +- NimbleOS 5.0.8 or later +- NimbleOS 5.1.3 or later + +### HPE Cloud Volumes + +- Amazon EKS 1.12.x/1.13.x +- Microsoft AKS 1.12.x/1.13.x +- US Regions Only + +## Configuration & Installation +The following table lists the configurable parameters of the FlexVolume driver chart and their default values. + +| Parameter | Description | Default | +|---------------------------|----------------------------------------------------------------------------------------------------|------------ | +| backend | HPE storage platform API endpoint. | 192.168.1.1 | +| pluginType | Backend plugin type to use. Currently `nimble` and `cv` are supported. | nimble | +| username | Username for the backend. Access key for HPE Cloud Volumes. | admin | +| password | Password for the backend. Access secret for HPE Cloud Volumes. | admin | +| protocol | Data plane protocol (`fc`, `iscsi`). | iscsi | +| fsType | Type of file to format volumes with (ext4, ext3, xfs, btrfs). | xfs | +| logLevel | Log level. Can be one of `info`, `debug`, `trace`, `warn` and `error` | info | +| mountConflictDelay | Wait this long (in seconds) before forcefully taking over a volume from an isolated or crashed node. | 120 | +| flavor | Kubernetes distribution specific tweaks. Supported flavors include `k8s`, `ocp`, `eks`, `aks` and `rke`. | k8s | +| podsMountDir | This is the directory where the kubelet bind mounts the volume for pods. May differ between Kubernetes distributions. | /var/lib/kubelet/pods | +| storageClass.name | The name to assign the created StorageClass. | hpe-standard | +| storageClass.create | Enables creation of StorageClass to consume this hpe-flexvolume-driver instance. | true | +| storageClass.defaultClass | Whether to set the created StorageClass as the clusters default StorageClass. | false | +| nimble.config | HPE Nimble Storage volume config parameters. | - | +| cv.config | HPE Cloud Volumes volume config parameters. | - | + +It's recommended to create a `values.yaml` file and edit it to fit the environment the chart is being deployed to. + +Example `values.yaml` using a Nimble backend: + +``` +--- +backend: 192.168.1.1 +username: admin +password: admin +pluginType: nimble +fsType: xfs +storageClass: + defaultClass: true +``` + +This will connect the driver to a Nimble based backend with management IP address of `192.168.1.1` and format new volumes with a XFS filesystem. + +The `nimble.config` or `cv.config` stanza will be hosted in a `ConfigMap` and can be used to tweak default parmaters and also override `StorageClass` parameters. More information on these stanzas can be found in the [ADVANCED.md](https://github.com/hpe-storage/flexvolume-driver/blob/master/ADVANCED.md) documentation. + +Example `nimble.config` stanza: + +``` +nimble: + config: + limitIOPS: "-1" + limitMBPS: "-1" + perfPolicy: DockerDefault +``` + +Example `cv.config` stanza: + +``` +cv: + config: + snapPrefix: BaseFor + automatedConnection: true + existingCloudSubnet: 10.1.0.0/24 + region: us-east-1 + privateCloud: vpc-data + cloudComputeProvider: "Amazon AWS" + perfPolicy: Other + volumeType: PF + encryption: true + protectionTemplate: twicedaily:4 + destroyOnRm: true + limitIOPS: "1000" + initiators: + - '"eth0"' + privateCloudResourceGroup: "" +``` + +**Note:** Storage class parameters will override the settings in `defaults` and `global` section. + +### Platform notes +Certain distributions demand certain tweaks to the variables for the driver and dynamic provisioner to operate correctly. See each platform for details. + +#### Upstream Kubernetes +This is the default operating mode, no tweaks are needed. + +#### Red Hat OpenShift and OKD +Applicable to Red Hat OpenShift 3.10 and 3.11. 4.x is not supported*. + +| Key | Value | Description | +|------------|---------------------------|------------------------------------------------------------------------------------| +| podsMountDir | /var/lib/origin/openshift.local.volumes | This is the directory where the kubelet bind mounts the volume for pods. | + +* = If experimentation is desirable with OpenShift 4.x, set `flexVolumeExec` default path for ocp to `/etc/kubernetes/kubelet-plugins/volume/exec`. The driver will only work on RHEL 7.x nodes. + +#### Rancher +Applicable to installing the Helm Chart via the Rancher catalog system. + +| Key | Value | Description | +|------------|---------------------------|------------------------------------------------------------------------------------| +| flavor | rke | Required and prepopulated by default. | +| podsMountDir | /var/lib/kubelet/volumeplugins | This is the directory where the kubelet bind mounts the volume for pods. Required and prepopulated by default.| + +## Installing the Chart +To install the chart with the name `hpe-flexvolume`: + +``` +helm repo add hpe-storage https://hpe-storage.github.io/co-deployments/ +helm install hpe-storage/hpe-flexvolume-driver --namespace kube-system --name hpe-flexvolume -f values.yaml +``` + +**Note:** Omitting the `--name` flag will generate a human readable name. + +## Check status of the Chart +To check status of the `hpe-flexvolume` deployment: + +``` +helm status hpe-flexvolume +``` + +## Uninstalling the Chart +To uninstall/delete the `hpe-flexvolume` deployment: + +``` +helm delete hpe-flexvolume --purge +``` + +## Alternative install method +In some cases it's more practical provide the local configuration via the `helm` command directly. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +``` +helm install --name hpe-flexvolume hpe/hpe-flexvolume-driver \ +--set backend=X.X.X.X --set username=admin --set password=xxxxxxxxx \ +--set protocol=iscsi --set fsType=xfs +``` + +## Using the HPE Volume Driver for Kubernetes FlexVolume Plugin +To enable dynamic provisioning of `PersistentVolume` through the use of `PersistentVolumeClaim` API objects, a `StorageClass` needs to be declared on the cluster. Please see the [HPE Volume Driver for Kubernetes FlexVolume Plugin](https://github.com/hpe-storage/flexvolume-driver) repository for the official documentation for this Helm chart. Also, it's helpful to be familar with [persistent storage concepts](https://kubernetes.io/docs/concepts/storage/volumes/) in Kubernetes prior to deploying stateful workloads. + +## Support +The HPE Volume Driver for Kubernetes FlexVolume Plugin Helm chart is supported by the respective platform team. Currently supported platforms: + +- HPE Nimble Storage +- HPE Cloud Volumes + +Please file issues through the regular support channels for the particular platform. Feature requests or general questions to developers may be filed through the [GitHub issue tracker](https://github.com/hpe-storage/co-deployments) for this project. + +You may also join our Slack community to chat with HPE folks close to this project for inquiries not requring our immediate response. We hang out in `#NimbleStorage` and `#Kubernetes` at [slack.hpedev.io](https://slack.hpedev.io/). + +## Contributing +We value all feedback and contributions. If you find any issues or want to contribute, please feel free to open an issue or file a PR. More details in [CONTRIBUTING.md](https://github.com/hpe-storage/co-deployments/blob/master/CONTRIBUTING.md) + +## License +This is open source software licensed using the Apache License 2.0. Please see [LICENSE](https://github.com/hpe-storage/co-deployments/blob/master/LICENSE) for details. diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/app-readme.md b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/app-readme.md new file mode 100644 index 000000000..4d8e79cf5 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/app-readme.md @@ -0,0 +1,3 @@ +# HPE Volume Driver for Kubernetes FlexVolume Plugin + +The [HPE Volume Driver for Kubernetes FlexVolume plugin](https://github.com/hpe-storage/flexvolume-driver) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. This chart also deploys the [HPE Dynamic Provisioner for Kubernetes](https://github.com/hpe-storage/k8s-dynamic-provisioner). diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/questions.yml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/questions.yml new file mode 100644 index 000000000..034718811 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/questions.yml @@ -0,0 +1,172 @@ +questions: +- variable: flavor + label: "Kubernetes flavor" + type: enum + default: "rke" + required: true + options: + - "rke" + - "eks" + - "ocp" + - "aks" + - "gke" + - "gkeop" + - "k8s" + description: "Tweak Helm chart behavior." + group: "Rancher specific settings" +- variable: pluginType + label: "HPE platform" + type: enum + options: + - "nimble" + - "cv" + - "simplivity" + default: "nimble" + description: "HPE platform type for the deployment." + group: "HPE backend settings" +- variable: backend + label: "IP address" + type: string + required: true + description: "Please specify HPE backend IP address." + group: "HPE backend settings" +- variable: username + label: "Username" + type: string + required: true + description: "Specify username with backend storage admin permissions." + group: "HPE backend settings" +- variable: password + label: "Password" + type: password + required: true + description: "Specify password for the backend user." + group: "HPE backend settings" +- variable: fsType + label: "Filesystem" + default: "xfs" + type: enum + options: + - "xfs" + - "ext4" + - "ext3" + - "btrfs" + description: "Select the filesystem for Persistent Volumes, defaults to xfs." + group: "HPE StorageClass and volume settings" +- variable: protocol + label: "HPE storage protocol" + type: enum + default: "iscsi" + options: + - "iscsi" + - "fc" + description: "Specify storage protocol for HPE backend connectivity." + group: "HPE StorageClass and volume settings" +- variable: storageClass.create + label: "Create a StorageClass" + type: boolean + default: true + required: true + description: "If specified as 'true', a StorageClass named 'hpe-standard' will be created with the HPE Volume Driver for Kubernetes FlexVolume Plugin as provisioner." + group: "HPE StorageClass and volume settings" +- variable: storageClass.defaultClass + label: "Mark StorageClass 'hpe-standard' as 'default'." + type: boolean + default: false + description: "If specified as 'true', the 'hpe-standard' StorageClass will be annotated as 'default'. This option is ignored if 'Create a StorageClass' is set to 'false'." + group: "HPE StorageClass and volume settings" +- variable: cv.config.existingCloudSubnet + show_if: "pluginType=cv" + label: "Cloud subnet" + type: string + default: "" + required: true + description: "Cloud subnet of the cluster for connection provisioning" + group: "Cloud instance settings" +- variable: cv.config.privateCloud + show_if: "pluginType=cv" + label: "Virtual private cloud" + type: string + required: true + description: "Virtual private cloud of the cluster" + group: "Cloud instance settings" +- variable: cv.config.region + show_if: "pluginType=cv" + label: "Public cloud region" + type: string + required: true + description: "Public cloud provider region in which cluster resides" + group: "Cloud instance settings" +- variable: cv.config.cloudComputeProvider + show_if: "pluginType=cv" + label: "Public cloud provider" + type: enum + default: "Amazon AWS" + options: + - "Amazon AWS" + - "Microsoft Azure" + description: "Public cloud provider name" + group: "Cloud instance settings" +- variable: cv.config.privateCloudResourceGroup + show_if: "cv.config.cloudComputeProvider=Microsoft Azure" + label: "Azure Resource Group" + type: string + required: true + description: "Azure resource group for the cluster" + group: "Cloud instance settings" +- variable: cv.config.volumeType + show_if: "pluginType=cv" + label: "Volume type" + type: enum + default: "PF" + options: + - "PF" + - "GPF" + description: "HPE Cloud Volume type" + group: "HPE Cloud Volumes settings" +- variable: cv.config.encryption + show_if: "pluginType=cv" + label: "Volume Encryption" + type: boolean + default: true + required: true + description: "Encryption for HPE Cloud Volume" + group: "HPE Cloud Volumes settings" +- variable: cv.config.protectionTemplate + show_if: "pluginType=cv" + label: "Protection template" + type: enum + default: "twicedaily:4" + options: + - "daily:3" + - "daily:7" + - "daily:14" + - "hourly:6" + - "hourly:12" + - "hourly:24" + - "twicedaily:4" + - "twicedaily:8" + - "twicedaily:14" + - "weekly:2" + - "weekly:4" + - "weekly:8" + - "monthly:3" + - "monthly:6" + - "monthly:12" + - "none" + description: "Protection Template" + group: "HPE Cloud Volumes settings" +- variable: cv.config.perfPolicy + show_if: "pluginType=cv" + label: "Performance policy" + type: enum + default: "Other" + options: + - "Other" + - "Exchange" + - "Oracle" + - "SharePoint" + - "SQL" + - "Windows File Server" + description: "Performance policy" + group: "HPE Cloud Volumes settings" diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/NOTES.txt b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/_helpers.tpl b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/_helpers.tpl new file mode 100644 index 000000000..d0a095b72 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hpe-flexvolume-driver.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hpe-flexvolume-driver.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hpe-flexvolume-driver.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-config.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-config.yaml new file mode 100644 index 000000000..c65a2a576 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-config.yaml @@ -0,0 +1,58 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: hpe-config + namespace: {{ .Release.Namespace }} +data: + volume-driver.json: |- + {{- if eq .Values.pluginType "nimble"}} + { + "global": {}, + "defaults": { + "limitIOPS": {{ .Values.nimble.config.limitIOPS | quote }}, + "limitMBPS": {{ .Values.nimble.config.limitMBPS | quote }}, + "perfPolicy": {{ .Values.nimble.config.perfPolicy | quote }} + }, + "overrides": {} + } + {{- else if eq .Values.pluginType "cv"}} + { + "global": { + "snapPrefix": {{ .Values.cv.config.snapPrefix | quote }}, + "automatedConnection": {{ .Values.cv.config.automatedConnection }}, + "initiators": [{{- join "," .Values.cv.config.initiators }}], + "automatedConnection": {{ .Values.cv.config.automatedConnection }}, + "existingCloudSubnet": {{ .Values.cv.config.existingCloudSubnet | quote }}, + "region": {{ .Values.cv.config.region | quote }}, + "privateCloud": {{ .Values.cv.config.privateCloud | quote }}, + {{- if and .Values.cv.config.privateCloudResourceGroup (ne .Values.cv.config.privateCloudResourceGroup "") }} + "privateCloudResourceGroup": {{ .Values.cv.config.privateCloudResourceGroup | quote }}, + {{- end }} + "cloudComputeProvider": {{ .Values.cv.config.cloudComputeProvider | quote }} + }, + "defaults": { + "perfPolicy": {{ .Values.cv.config.perfPolicy | quote }}, + "limitIOPS": {{ .Values.cv.config.limitIOPS | quote }}, + "volumeType": {{ .Values.cv.config.volumeType | quote }}, + "encryption": {{ .Values.cv.config.encryption }}, + "protectionTemplate": {{ .Values.cv.config.protectionTemplate | quote }}, + "destroyOnRm": {{ .Values.cv.config.destroyOnRm }} + }, + "overrides": {} + } + {{- else }} + { + "global": {}, + "defaults": {}, + "overrides": {} + } + {{- end }} + + {{- if eq .Values.flavor "rke"}} + {{ .Values.pluginType }}.json: |- + { + "dockerVolumePluginSocketPath": "/host/etc/hpe-storage/{{ .Values.pluginType }}.sock" + } + {{- end }} + diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd-rbac.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd-rbac.yaml new file mode 100644 index 000000000..788ed97ac --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd-rbac.yaml @@ -0,0 +1,40 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-dynamic-provisioner-role + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-dynamic-provisioner-binding +subjects: + - kind: ServiceAccount + name: hpe-flexvolume-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-dynamic-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hpe-flexvolume-sa + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd.yaml new file mode 100644 index 000000000..eff543acc --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-doryd.yaml @@ -0,0 +1,67 @@ +#### HPE Dynamic Provisioner ### +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hpe-dynamic-provisioner + namespace: {{ .Release.Namespace }} + labels: + app: hpe-dynamic-provisioner +spec: + selector: + matchLabels: + daemon: hpe-dynamic-provisioner-daemon + strategy: + type: RollingUpdate + template: + metadata: + labels: + daemon: hpe-dynamic-provisioner-daemon + name: hpe-dynamic-provisioner + spec: + restartPolicy: Always + serviceAccountName: hpe-flexvolume-sa + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - + image: {{ .Values.dynamicProvisionerImage}}: {{- .Values.dynamicProvisionerTag}} + imagePullPolicy: Always + name: hpe-dynamic-provisioner + env: + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + volumeMounts: + - name: k8s + mountPath: /etc/kubernetes + - name: flexvolumedriver + mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + - name: varlog + mountPath: /var/log + {{- if eq .Values.flavor "rke"}} + - name: hpeconfig + mountPath: /host/etc/hpe-storage + {{- else }} + - name: hpeconfig + mountPath: /etc/hpe-storage + {{- end }} + securityContext: + privileged: true + volumes: + - name: k8s + hostPath: + path: /etc/kubernetes + - name: flexvolumedriver + hostPath: + {{- $flavor := .Values.flavor -}} + {{- range .Values.flexVolumeExec }} + {{- if eq .name $flavor }} + path: {{ .value }} + {{- end }} + {{- end }} + - name: hpeconfig + hostPath: + path: /etc/hpe-storage + - name: varlog + hostPath: + path: /var/log diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-flexvolume-plugin.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-flexvolume-plugin.yaml new file mode 100644 index 000000000..af86aae33 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-flexvolume-plugin.yaml @@ -0,0 +1,216 @@ + #### Flexvolume Driver ### +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hpe-flexvolume-driver + namespace: {{ .Release.Namespace }} + labels: + k8s-app: hpe-flexvolume-driver +spec: + selector: + matchLabels: + name: hpe-flexvolume-driver + template: + metadata: + labels: + name: hpe-flexvolume-driver + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: hpe-flexvolume-sa + containers: + - name: flexvolume + image: {{ .Values.flexVolumeDriverImage}}: {{- .Values.flexVolumeDriverTag}} + imagePullPolicy: "Always" + lifecycle: + preStop: + # create empty file to let plugin signal handler to perform cleanup of config/cert/dory files + exec: + command: [ "/bin/sh", "-c", "touch /etc/hpe-storage/remove" ] + {{- if eq .Values.flavor "rke"}} + postStart: + exec: + command: [ "/bin/bash", "-c", + "while [[ ! -d /var/lib/kubelet/volumeplugins/hpe.com~{{ .Values.pluginType }} ]] || [[ ! -f /etc/hpe-storage/{{ .Values.pluginType }}.json ]]; do sleep 1; done; cp -a /etc/hpe-storage/{{ .Values.pluginType }}.json /var/lib/kubelet/volumeplugins/hpe.com~{{ .Values.pluginType }}/{{ .Values.pluginType }}.json" ] + {{- end }} + env: + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + - name: FLAVOR + value: {{ .Values.flavor }} + - name: PROVIDER_IP + valueFrom: + secretKeyRef: + name: hpe-secret + key: backend + - name: PROVIDER_USERNAME + valueFrom: + secretKeyRef: + name: hpe-secret + key: username + - name: PROVIDER_PASSWORD + valueFrom: + secretKeyRef: + name: hpe-secret + key: password + - name: PROTOCOL + valueFrom: + secretKeyRef: + name: hpe-secret + key: protocol + {{- if eq .Values.pluginType "cv"}} + - name: PROVIDER_PORT + valueFrom: + secretKeyRef: + name: hpe-secret + key: servicePort + - name: PROVIDER_SERVICE + valueFrom: + secretKeyRef: + name: hpe-secret + key: serviceName + {{- end }} + - name: SCOPE + value: global + - name: PLUGIN_TYPE + value: {{ .Values.pluginType }} + volumeMounts: + - name: pluginmountdir + mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + - name: bindmountdir + mountPath: {{ .Values.podsMountDir }} + mountPropagation: Bidirectional + - name: legacymounts + mountPath: /opt/nimble + mountPropagation: Bidirectional + - name: dev + mountPath: /dev + - name: libmodules + mountPath: /lib/modules + - name: var-log + mountPath: /var/log + - name: var-lib-iscsi + mountPath: /var/lib/iscsi + - name: exec + mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + - name: runlock + mountPath: /run/lock + - name: etc-iscsi + mountPath: /etc/iscsi + - name: etc-multipath + mountPath: /etc/multipath + {{- if ne .Values.flavor "rke"}} + - name: etc-multipath-conf + mountPath: /etc/multipath.conf + {{- end }} + - name: etc-redhat-release + mountPath: /etc/redhat-release + - name: etc-os-release + mountPath: /etc/os-release + - name: etc-hpe-storage-dir + mountPath: /etc/hpe-storage + {{- if eq .Values.flavor "rke"}} + - name: etc-hpe-storage-dir + mountPath: /host/etc/hpe-storage + {{- end }} + - name: sys + mountPath: /sys + - name: iscsiadm + mountPath: /sbin/iscsiadm + - name: config-file + mountPath: /etc/hpe-storage/volume-driver.json + subPath: volume-driver.json + {{- if eq .Values.flavor "rke"}} + - name: config-file + mountPath: /etc/hpe-storage/{{ .Values.pluginType }}.json + subPath: {{ .Values.pluginType }}.json + {{- end }} + - name: runsystemd + mountPath: /run/systemd + - name: libsystemd + mountPath: /lib/systemd/system + - name: usrlocal + mountPath: /usr_local + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumes: + - name: pluginmountdir + hostPath: + path: /var/lib/kubelet + - name: bindmountdir + hostPath: + path: {{ .Values.podsMountDir }} + # required to handle legacy mounts from NLT based plugin. Remove this for CoreOS + - name: legacymounts + hostPath: + path: /opt/nimble/ + - name: dev + hostPath: + path: /dev + - name: libmodules + hostPath: + path: /lib/modules + - name: var-log + hostPath: + path: /var/log + - name: var-lib-iscsi + hostPath: + path: /var/lib/iscsi/ + - name: exec + hostPath: + {{- $flavor := .Values.flavor -}} + {{- range .Values.flexVolumeExec }} + {{- if eq .name $flavor }} + path: {{ .value }} + {{- end }} + {{- end }} + - name: runlock + hostPath: + path: /run/lock + - name: etc-iscsi + hostPath: + path: /etc/iscsi/ + - name: etc-multipath + hostPath: + path: /etc/multipath/ + {{- if ne .Values.flavor "rke"}} + - name: etc-multipath-conf + hostPath: + path: /etc/multipath.conf + type: FileOrCreate + {{- end }} + - name: etc-redhat-release + hostPath: + path: /etc/redhat-release + type: FileOrCreate + - name: etc-os-release + hostPath: + path: /etc/os-release + type: FileOrCreate + - name: etc-hpe-storage-dir + hostPath: + path: /etc/hpe-storage/ + - name: sys + hostPath: + path: /sys + - name: iscsiadm + hostPath: + path: /sbin/iscsiadm + type: FileOrCreate + - name: config-file + configMap: + name: hpe-config + - name: runsystemd + hostPath: + path: /run/systemd + - name: libsystemd + hostPath: + path: /lib/systemd/system + - name: usrlocal + hostPath: + path: /usr diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-secret.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-secret.yaml new file mode 100644 index 000000000..7aaee67e9 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpe-secret.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: hpe-secret + namespace: {{ .Release.Namespace }} +stringData: + username: {{ .Values.username }} + password: {{ .Values.password }} + {{- if eq .Values.pluginType "cv"}} + backend: {{ .Values.backend }} + servicePort: {{ .Values.servicePort | quote }} + serviceName: {{ .Values.serviceName }} + protocol: "iscsi" + {{- else }} + backend: {{ .Values.backend }} + protocol: {{ .Values.protocol }} + {{- end }} diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpecv-cp.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpecv-cp.yaml new file mode 100644 index 000000000..fd28355ee --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/hpecv-cp.yaml @@ -0,0 +1,66 @@ +{{- if eq .Values.pluginType "cv"}} +# Configuration to deploy the HPE Nimble Storage Container Provider service +# +# example usage: kubectl create -f + +--- + +####################################### +############ CP Service ############ +####################################### +kind: Service +apiVersion: v1 +metadata: + name: {{ .Values.serviceName }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.serviceName }} +spec: + ports: + - port: {{ .Values.servicePort }} + protocol: TCP + selector: + app: cv-cp + +--- + +########################################## +############ CP Deployment ############ +########################################## +kind: Deployment +apiVersion: apps/v1 +metadata: + name: cv-cp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: cv-cp + replicas: 1 + template: + metadata: + labels: + app: cv-cp + spec: + serviceAccountName: hpe-flexvolume-sa + containers: + - name: cv-cp + image: {{ .Values.containerProviderImage}}: {{- .Values.containerProviderTag}} + imagePullPolicy: Always + env: + - name: CLOUDVOLUMES_PORTAL_SERVER + value: {{ .Values.backend }} + - name: CLOUDVOLUMES_PORT + value: {{ .Values.servicePort | quote }} + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + ports: + - containerPort: {{ .Values.servicePort }} + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log +{{- end }} diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/post-install.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/post-install.yaml new file mode 100644 index 000000000..60dfa8559 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/post-install.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{.Release.Name}}" + labels: + app.kubernetes.io/managed-by: {{.Release.Service | quote}} + app.kubernetes.io/instance: {{.Release.Name | quote}} + app.kubernetes.io/version: {{.Chart.AppVersion | quote}} + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: "{{.Release.Name}}" + labels: + app.kubernetes.io/managed-by: {{.Release.Service | quote }} + app.kubernetes.io/instance: {{.Release.Name | quote }} + helm.sh/chart: "{{.Chart.Name}}-{{.Chart.Version}}" + spec: + restartPolicy: Never + containers: + - name: post-install-job + image: "alpine:3.3" + command: ["/bin/sleep","{{default "10" .Values.serviceWaitTime}}"] + diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/sc.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/sc.yaml new file mode 100644 index 000000000..3dc98eb48 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/templates/sc.yaml @@ -0,0 +1,16 @@ +{{ if .Values.storageClass.create -}} +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ .Values.storageClass.name }} + labels: + plugin: {{ .Release.Name }} + {{- if .Values.storageClass.defaultClass }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" + {{- end }} +provisioner: hpe.com/{{ .Values.pluginType }} +parameters: + description: {{ .Values.volumeDescription }} +{{- end }} diff --git a/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/values.yaml b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/values.yaml new file mode 100644 index 000000000..77d2762c2 --- /dev/null +++ b/charts/hpe-flexvolume-driver/hpe-flexvolume-driver/3.1.000/values.yaml @@ -0,0 +1,97 @@ +# Default values for hpe-flexvolume-driver +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +#doryd image +dynamicProvisionerTag: v3.1.0 +dynamicProvisionerImage: store/hpestorage/k8s-dynamic-provisioner + +#flexvolume plugin image +flexVolumeDriverTag: v3.1.0 +flexVolumeDriverImage: store/hpestorage/flexvolume-driver + +#container-provider image +containerProviderTag: v3.1.0 +containerProviderImage: store/hpestorage/cv-cp + +#parameters +backend: 192.168.1.1 +username: admin +password: admin +protocol: iscsi +servicePort: "8080" +serviceName: cv-cp-svc + +#storage class parameters +fsType: xfs +volumeDescription: "Volume created by HPE Volume Driver for Kubernetes FlexVolume Plugin" + +#service parameters +# wait seconds for doryd/flexvolume node plugins to start +serviceWaitTime: "10" + +#flavor +flavor: k8s + +#platform for which plugin is being deployed.i.e nimble or cv +pluginType: nimble + +#bindMountPath where kubelet bindmounts volume to pod namespace +podsMountDir: /var/lib/kubelet/pods + +#volumePluginDir volume plugin directory where kubelet watches for flexvolume plugin +flexVolumeExec: + - name: eks + value: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + - name: k8s + value: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + - name: ocp + value: /etc/kubernetes/kubelet-plugins/volume/exec + - name: gkeop + value: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + - name: gke + value: /home/kubernetes/flexvolume + - name: aks + value: /etc/kubernetes/volumeplugins + - name: rke + value: /var/lib/kubelet/volumeplugins + +#log level for flexvolume driver and dynamic provisioner +logLevel: info + +## For creating the StorageClass automatically: +storageClass: + create: true + + ## Set StorageClass as the default StorageClass + ## Ignored if storageClass.create is false + defaultClass: false + + ## Set a StorageClass name + ## Ignored if storageClass.create is false + name: hpe-standard + +nimble: + config: + limitIOPS: "-1" + limitMBPS: "-1" + perfPolicy: DockerDefault + +cv: + config: + snapPrefix: BaseFor + automatedConnection: true + existingCloudSubnet: 10.1.0.0/24 + region: us-east-1 + privateCloud: vpc-data + cloudComputeProvider: "Amazon AWS" + perfPolicy: Other + volumeType: PF + encryption: true + protectionTemplate: twicedaily:4 + destroyOnRm: true + limitIOPS: "1000" + # In case of multiple initiators, add one per line and escape double quotes as below + initiators: + - '"eth0"' + privateCloudResourceGroup: "" diff --git a/charts/instana-agent/instana-agent/1.0.2900/.helmignore b/charts/instana-agent/instana-agent/1.0.2900/.helmignore new file mode 100644 index 000000000..b4fa6cb0d --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# OWNERS file for helm +OWNERS diff --git a/charts/instana-agent/instana-agent/1.0.2900/Chart.yaml b/charts/instana-agent/instana-agent/1.0.2900/Chart.yaml new file mode 100644 index 000000000..6b52668cd --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/Chart.yaml @@ -0,0 +1,27 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: instana-agent +apiVersion: v1 +appVersion: "1.1" +description: Instana Agent for Kubernetes +home: https://www.instana.com/ +icon: https://instana-management-assets.s3-eu-west-1.amazonaws.com/stan-logo-2020.png +maintainers: +- email: jon.brisbin@instana.com + name: jbrisbin +- email: william.james@instana.com + name: wiggzz +- email: jeroen.soeters@instana.com + name: JeroenSoeters +- email: fabian.staeber@instana.com + name: fstab +- email: miel.donkers@instana.com + name: mdonkers +- email: dahlia.bock@instana.com + name: dlbock +- email: nathan.fisher@instana.com + name: nfisher +name: instana-agent +sources: +- https://github.com/instana/instana-agent-docker +version: 1.0.2900 diff --git a/charts/instana-agent/instana-agent/1.0.2900/README.md b/charts/instana-agent/instana-agent/1.0.2900/README.md new file mode 100644 index 000000000..dd17e51d6 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/README.md @@ -0,0 +1,164 @@ +# Instana + +Instana is an [APM solution](https://www.instana.com/) built for microservices that enables IT Ops to build applications faster and deliver higher quality services by automating monitoring, tracing and root cause analysis. This solution is optimized for [Kubernetes](https://www.instana.com/automatic-kubernetes-monitoring/). + +## Introduction + +This chart adds the Instana Agent to all schedulable nodes in your cluster via a `DaemonSet`. + +## Prerequisites + +Kubernetes 1.9.x - 1.18.x + +#### Helm 3 prerequisites + +Working `helm` with the `stable` repo added to your helm client. + +#### Helm 2 prerequisites + +Working `helm` and `tiller`. + +_Note:_ Tiller may need a service account and role binding if RBAC is enabled in your cluster. + +## Installing the Chart + +To configure the installation you can either specify the options on the command line using the **--set** switch, or you can edit **values.yaml**. Either way you should ensure that you set values for: + +* agent.key +* zone.name or cluster.name + +For most users, setting the `zone.name` is sufficient. However, if you would like to be able group your hosts based on the availability zone rather than cluster name, then you can specify the cluster name using the `cluster.name` instead of the `zone.name` setting. If you omit the `zone.name` the host zone will be automatically determined by the availability zone information on the host. + +If you're in the EU, you'll probably also want to set the regional equivalent values for: + +* agent.endpointHost +* agent.endpointPort + +_Note:_ You can find the options mentioned in the [configuration section below](#configuration) + +Optionally, if your infrastructure uses a proxy, you should ensure that you set values for: + +* agent.pod.proxyHost +* agent.pod.proxyPort +* agent.pod.proxyProtocol +* agent.pod.proxyUser +* agent.pod.proxyPassword +* agent.pod.proxyUseDNS + +Optionally, if your infrastructure has multiple networks defined, you might need to allow the agent to listen on all addresses (typically with value set to '*'): + +* agent.listenAddress + +If your agent requires download key, you should ensure that you set values for it: + +* agent.downloadKey + +Agent can have APM, INFRASTRUCTURE or AWS mode. Default is APM and if you want to override that, ensure you set value: + +* agent.mode + +#### Installing with Helm 3 + +First, create a namespace for the instana-agent + +```bash +$ kubectl create namespace instana-agent +``` + +To install the chart with the release name `instana-agent` and set the values on the command line run: + +```bash +$ helm install instana-agent --namespace instana-agent \ +--set agent.key=INSTANA_AGENT_KEY \ +--set agent.endpointHost=HOST \ +--set zone.name=ZONE_NAME \ +stable/instana-agent +``` + +#### Installing with Helm 2 + +To install the chart with the release name `instana-agent` and set the values on the command line run: + +```bash +$ helm install --name instana-agent --namespace instana-agent \ +--set agent.key=INSTANA_AGENT_KEY \ +--set agent.endpointHost=HOST \ +--set zone.name=ZONE_NAME \ +stable/instana-agent +``` + +## Uninstalling the Chart + +To uninstall/delete the `instana-agent` release: + +#### Uninstalling with Helm 2 + +```bash +$ helm del --purge instana-agent +``` + +#### Uninstalling with Helm 3 + +```bash +$ helm del instana-agent -n instana-agent +``` + +## Configuration + +### Helm Chart + +The following table lists the configurable parameters of the Instana chart and their default values. + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `agent.configuration_yaml` | Custom content for the agent configuration.yaml file | `nil` See [below](#agent) for more details | +| `agent.downloadKey` | Your Instana Download key | `nil` Usually not required | +| `agent.endpointHost` | Instana Agent backend endpoint host | `ingress-red-saas.instana.io` (US and ROW). If in Europe, please override with `ingress-blue-saas.instana.io` | +| `agent.endpointPort` | Instana Agent backend endpoint port | `443` | +| `agent.image.name` | The image name to pull | `instana/agent` | +| `agent.image.tag` | The image tag to pull | `latest` | +| `agent.image.pullPolicy` | Image pull policy | `Always` | +| `agent.key` | Your Instana Agent key | `nil` You must provide your own key | +| `agent.listenAddress` | List of addresses to listen on, or "*" for all interfaces | `nil` | +| `agent.mode` | Agent mode (Supported values are APM, INFRASTRUCTURE, AWS) | `APM` | +| `agent.pod.annotations` | Additional annotations to apply to the pod | `{}` | +| `agent.pod.limits.cpu` | Container cpu limits in cpu cores | `1.5` | +| `agent.pod.limits.memory` | Container memory limits in MiB | `512` | +| `agent.pod.proxyHost` | Hostname/address of a proxy | `nil` | +| `agent.pod.proxyPort` | Port of a proxy | `nil` | +| `agent.pod.proxyProtocol` | Proxy protocol (Supported proxy types are "http", "socks4", "socks5") | `nil` | +| `agent.pod.proxyUser` | Username of the proxy auth | `nil` | +| `agent.pod.proxyPassword` | Password of the proxy auth | `nil` | +| `agent.pod.proxyUseDNS` | Boolean if proxy also does DNS | `nil` | +| `agent.pod.requests.memory` | Container memory requests in MiB | `512` | +| `agent.pod.requests.cpu` | Container cpu requests in cpu cores | `0.5` | +| `agent.pod.tolerations` | Tolerations for pod assignment | `[]` | +| `agent.env` | Additional environment variables for the agent | `{}` | +| `agent.redactKubernetesSecrets` | Enable additional secrets redaction for selected Kubernetes resources | `nil` See [Kubernetes secrets](https://docs.instana.io/setup_and_manage/host_agent/on/kubernetes/#secrets) for more details. | +| `cluster.name` | Display name of the monitored cluster | Value of `zone.name` | +| `leaderElector.port` | Instana leader elector sidecar port | `42655` | +| `leaderElector.image.name` | The elector image name to pull | `instana/leader-elector` | +| `leaderElector.image.tag` | The elector image tag to pull | `0.5.4` | +| `podSecurityPolicy.enable` | Whether a PodSecurityPolicy should be authorized for the Instana Agent pods. Requires `rbac.create` to be `true` as well. | `false` See [PodSecurityPolicy](https://docs.instana.io/setup_and_manage/host_agent/on/kubernetes/#podsecuritypolicy) for more details. | +| `podSecurityPolicy.name` | Name of an _existing_ PodSecurityPolicy to authorize for the Instana Agent pods. If not provided and `podSecurityPolicy.enable` is `true`, a PodSecurityPolicy will be created for you. | `nil` | +| `rbac.create` | Whether RBAC resources should be created | `true` | +| `serviceAccount.create` | Whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | Name of the ServiceAccount to use | `instana-agent` | +| `zone.name` | Zone that detected technologies will be assigned to | `nil` You must provide either `zone.name` or `cluster.name`, see [above](#installing-the-chart) for details | + +#### Development and debugging options + +These options will be rarely used outside of development or debugging of the agent. + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `agent.host.repository` | Host path to mount as the agent maven repository | `nil` | + +### Agent + +To configure the agent, you can either: + +- edit the [config map](templates/configmap.yaml), or +- provide the configuration via the `agent.configuration_yaml` parameter in [values.yaml](values.yaml) + +This configuration will be used for all Instana Agents on all nodes. Visit the [agent configuration documentation](https://docs.instana.io/setup_and_manage/host_agent/#agent-configuration-file) for more details on configuration options. diff --git a/charts/instana-agent/instana-agent/1.0.2900/app-readme.md b/charts/instana-agent/instana-agent/1.0.2900/app-readme.md new file mode 100644 index 000000000..0e26c8623 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/app-readme.md @@ -0,0 +1,5 @@ +# Instana + +Instana is an [APM solution(https://www.instana.com/) built for microservices that enables IT Ops to build applications faster and deliver higher quality services by automating monitoring tracing and root cause analysis. This solution is optimized for [Rancher](https://www.instana.com/rancher/). + +This chart adds the Instana Agent to all schedulable nodes in your cluster via a `DaemonSet`. diff --git a/charts/instana-agent/instana-agent/1.0.2900/questions.yml b/charts/instana-agent/instana-agent/1.0.2900/questions.yml new file mode 100644 index 000000000..bf9d1a46d --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/questions.yml @@ -0,0 +1,236 @@ +questions: +# Basic agent configuration +- variable: agent.key + label: agent.key + description: "Your Instana Agent key is the secret token which your agent uses to authenticate to Instana's servers" + type: string + required: true + group: "Agent Configuration" +- variable: agent.endpointHost + label: agent.endpointHost + description: "The hostname of the Instana server your agents will connect to. Defaults to ingress-red-saas.instana.io for US and ROW. If in Europe, please use ingress-blue-saas.instana.io" + type: string + required: true + default: "ingress-red-saas.instana.io" + group: "Agent Configuration" +- variable: zone.name + label: zone.name + description: "Custom zone that detected technologies will be assigned to" + type: string + required: true + group: "Agent Configuration" +# Advanced agent configuration +- variable: advancedAgentConfiguration + description: "Show advanced configuration for the Instana Agent" + label: Show advanced configuration + type: boolean + default: false + show_subquestion_if: true + group: "Advanced Agent Configuration" + subquestions: + - variable: agent.configuration_yaml + label: agent.configuration_yaml (Optional) + description: "Custom content for the agent configuration.yaml file in YAML format. Please use the 'Edit as YAML' feature in the Rancher UI for the best editing experience." + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.downloadKey + label: agent.downloadKey (Optional) + description: "Your Instana download key" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.endpointPort + label: agent.endpointPort + description: "The Agent backend port number (as a string) of the Instana server your agents will connect to" + type: string + required: true + default: "443" + group: "Advanced Agent Configuration" + - variable: agent.image.name + label: agent.image.name + description: "The name of the Instana Agent container image" + type: string + required: true + default: "instana/agent" + group: "Advanced Agent Configuration" + - variable: agent.image.tag + label: agent.image.tag + description: "The tag name of the Instana Agent container image" + type: string + required: true + default: "latest" + group: "Advanced Agent Configuration" + - variable: agent.image.pullPolicy + label: agent.image.pullPolicy + description: "Specifies when to pull the Instana Agent image container" + type: string + required: true + default: "Always" + group: "Advanced Agent Configuration" + - variable: agent.listenAddress + label: agent.listenAddress (Optional) + description: "The IP address the agent HTTP server will listen to, or '*' for all interfaces" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.mode + label: agent.mode (Optional) + description: "Agent mode. Possible options are: APM, INFRASTRUCTURE or AWS" + type: enum + options: + - "APM" + - "INFRASTRUCTURE" + - "AWS" + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.annotations + label: agent.pod.annotations (Optional) + description: "Additional annotations to be added to the agent pods in YAML format. Please use the 'Edit as YAML' feature in the Rancher UI for the best editing experience." + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.limits.cpu + label: agent.pod.limits.cpu + description: "CPU units allocation limits for the agent pods" + type: string + required: true + default: "1.5" + group: "Advanced Agent Configuration" + - variable: agent.pod.limits.memory + label: agent.pod.limits.memory + description: "Memory allocation limits in MiB for the agent pods" + type: int + required: true + default: 512 + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyHost + label: agent.pod.proxyHost (Optional) + description: "Hostname/address of a proxy. Sets the INSTANA_AGENT_PROXY_HOST environment variable" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyPort + label: agent.pod.proxyPort (Optional) + description: "Port of a proxy. Sets the INSTANA_AGENT_PROXY_PORT environment variable" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyProtocol + label: agent.pod.proxyProtocol (Optional) + description: "Proxy protocol. Sets the INSTANA_AGENT_PROXY_PROTOCOL environment variable. Supported proxy types are http, socks4, socks5" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyUser + label: agent.pod.proxyUser (Optional) + description: "Username of the proxy auth. Sets the INSTANA_AGENT_PROXY_USER environment variable" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyPassword + label: agent.pod.proxyPassword (Optional) + description: "Password of the proxy auth. Sets the INSTANA_AGENT_PROXY_PASSWORD environment variable" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.proxyUseDNS + label: agent.pod.proxyUseDNS. (Optional) + description: "Boolean if proxy also does DNS. Sets the INSTANA_AGENT_PROXY_USE_DNS environment variable" + type: enum + options: + - "true" + - "false" + required: false + group: "Advanced Agent Configuration" + - variable: agent.pod.requests.cpu + label: agent.pod.requests.cpu + description: "Requested CPU units allocation for the agent pods" + type: string + required: true + default: "0.5" + group: "Advanced Agent Configuration" + - variable: agent.pod.requests.memory + label: agent.pod.requests.memory + description: "Requested memory allocation in MiB for the agent pods" + type: int + required: true + default: 512 + group: "Advanced Agent Configuration" + - variable: agent.pod.tolerations + label: agent.pod.tolerations (Optional) + description: "Tolerations to influence agent pod assignment in YAML format. Please use the 'Edit as YAML' feature in the Rancher UI for the best editing experience." + type: string + required: false + group: "Advanced Agent Configuration" + - variable: agent.redactKubernetesSecrets + label: agent.redactKubernetesSecrets (Optional) + description: "Enable additional secrets redaction for selected Kubernetes resources" + type: boolean + required: false + default: false + group: "Advanced Agent Configuration" + - variable: cluster.name + label: cluster.name (Optional) + description: "The name that will be assigned to this cluster in Instana. See the 'Installing the Chart' section in the 'Detailed Descriptions' tab for more details" + type: string + required: false + group: "Advanced Agent Configuration" + - variable: leaderElector.image.name + label: leaderElector.image.name + description: "The name of the leader elector container image" + type: string + required: true + default: "instana/leader-elector" + group: "Advanced Agent Configuration" + - variable: leaderElector.image.tag + label: leaderElector.image.tag + description: "The tag name of the leader elector container image" + type: string + required: true + default: "0.5.4" + group: "Advanced Agent Configuration" + - variable: leaderElector.port + label: leaderElector.port + description: "The port on which the leader elector sidecar is exposed" + type: int + required: true + default: 42655 + group: "Advanced Agent Configuration" +- variable: podSecurityPolicy.enable + label: podSecurityPolicy.enable (Optional) + description: "Specifies whether a PodSecurityPolicy should be authorized for the Instana Agent pods. Requires `rbac.create` to also be `true`" + type: boolean + show_if: "rbac.create=true" + required: false + default: false + group: "Pod Security Policy Configuration" +- variable: podSecurityPolicy.name + label: podSecurityPolicy.name (Optional) + description: "The name of an existing PodSecurityPolicy you would like to authorize for the Instana Agent pods. If not set and `podSecurityPolicy.enable` is `true`, a PodSecurityPolicy will be created with a name generated using the fullname template" + type: string + show_if: "rbac.create=true&&podSecurityPolicy.enable=true" + required: false + group: "Pod Security Policy Configuration" +- variable: rbac.create + label: rbac.create + description: "Specifies whether RBAC resources should be created" + type: boolean + required: true + default: true + group: "RBAC Configuration" +- variable: serviceAccount.create + label: serviceAccount.create + description: "Specifies whether a ServiceAccount should be created" + type: boolean + required: true + default: true + show_subquestion_if: true + group: "RBAC Configuration" + subquestions: + - variable: serviceAccount.name + label: Name of the ServiceAccount (Optional) + description: "The name of the ServiceAccount to use. If not set and `serviceAccount.create` is true, a name is generated using the fullname template." + type: string + required: false + group: "RBAC Configuration" diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/NOTES.txt b/charts/instana-agent/instana-agent/1.0.2900/templates/NOTES.txt new file mode 100644 index 000000000..8e05968f1 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/NOTES.txt @@ -0,0 +1,66 @@ +{{- if (and (not .Values.agent.key) (and (not .Values.zone.name) (not .Values.cluster.name))) }} +############################################################################## +#### ERROR: You did not specify your secret agent key. #### +#### ERROR: You also did not specify a zone or name for this cluster. #### +############################################################################## + +This agent deployment will be incomplete until you set your agent key and zone or name for this cluster: + + helm upgrade {{ .Release.Name }} --reuse-values \ + --set agent.key=$(YOUR_SECRET_AGENT_KEY) \ + --set zone.name=$(YOUR_ZONE_NAME) stable/instana-agent + +Alternatively, you may specify a cluster name and the zone will be detected from availability zone information on the host: + + helm upgrade {{ .Release.Name }} --reuse-values \ + --set agent.key=$(YOUR_SECRET_AGENT_KEY) \ + --set cluster.name=$(YOUR_CLUSTER_NAME) stable/instana-agent + +- YOUR_SECRET_AGENT_KEY can be obtained from the Management Portal section of your Instana installation. +- YOUR_ZONE_NAME should be the zone that detected technologies will be assigned to. +- YOUR_CLUSTER_NAME should be the custom name of your cluster. + +At least one of zone.name or cluster.name is required. This cluster will be reported with the name of the zone unless you specify a cluster name. + +{{- else if (and (not .Values.zone.name) (not .Values.cluster.name)) }} +############################################################################## +#### ERROR: You did not specify a zone or name for this cluster. #### +############################################################################## + +This agent deployment will be incomplete until you set a zone for this cluster: + + helm upgrade {{ .Release.Name }} --reuse-values \ + --set zone.name=$(YOUR_ZONE_NAME) stable/instana-agent + +Alternatively, you may specify a cluster name and the zone will be detected from availability zone information on the host: + + helm upgrade {{ .Release.Name }} --reuse-values \ + --set cluster.name=$(YOUR_CLUSTER_NAME) stable/instana-agent + +- YOUR_ZONE_NAME should be the zone that detected technologies will be assigned to. +- YOUR_CLUSTER_NAME should be the custom name of your cluster. + +At least one of zone.name or cluster.name is required. This cluster will be reported with the name of the zone unless you specify a cluster name. + +{{- else if not .Values.agent.key }} +############################################################################## +#### ERROR: You did not specify your secret agent key. #### +############################################################################## + +This agent deployment will be incomplete until you set your agent key: + + helm upgrade {{ .Release.Name }} --reuse-values \ + --set agent.key=$(YOUR_SECRET_AGENT_KEY) stable/instana-agent + +- YOUR_SECRET_AGENT_KEY can be obtained from the Management Portal section of your Instana installation. + +{{- else -}} +It may take a few moments for the agents to fully deploy. You can see what agents are running by listing resources in the {{ .Release.Namespace }} namespace: + + kubectl get all -n {{ .Release.Namespace }} + +You can get the logs for all of the agents with `kubectl logs`: + + kubectl logs -l app.kubernetes.io/name={{ .Release.Name }} -n {{ .Release.Namespace }} -c instana-agent + +{{- end }} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/_helpers.tpl b/charts/instana-agent/instana-agent/1.0.2900/templates/_helpers.tpl new file mode 100644 index 000000000..674e730f2 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "instana-agent.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "instana-agent.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "instana-agent.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +The name of the ServiceAccount used. +*/}} +{{- define "instana-agent.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "instana-agent.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +The name of the PodSecurityPolicy used. +*/}} +{{- define "instana-agent.podSecurityPolicyName" -}} +{{- if .Values.podSecurityPolicy.enable -}} +{{ default (include "instana-agent.fullname" .) .Values.podSecurityPolicy.name }} +{{- end -}} +{{- end -}} + +{{/* +Add Helm metadata to resource labels. +*/}} +{{- define "instana-agent.commonLabels" -}} +app.kubernetes.io/name: {{ include "instana-agent.name" . }} +{{ if .Values.templating -}} +app.kubernetes.io/version: {{ .Chart.Version }} +{{- else -}} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ include "instana-agent.chart" . }} +{{- end -}} +{{- end -}} + +{{/* +Add Helm metadata to selector labels specifically for deployments/daemonsets/statefulsets. +*/}} +{{- define "instana-agent.selectorLabels" -}} +app.kubernetes.io/name: {{ include "instana-agent.name" . }} +{{ if not .Values.templating -}} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/agentsecret.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/agentsecret.yaml new file mode 100644 index 000000000..2e378276c --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/agentsecret.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.agent.key .Values.agent.downloadKey }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "instana-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +type: Opaque +data: +{{- if .Values.templating }} + key: {{ .Values.agent.key }} +{{- else }} + key: {{ .Values.agent.key | b64enc | quote }} +{{- end }} +{{- if .Values.agent.downloadKey }} + downloadKey: {{ .Values.agent.downloadKey | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrole.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrole.yaml new file mode 100644 index 000000000..91b3ad026 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrole.yaml @@ -0,0 +1,68 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRole +{{- if .Values.agent.supportOpenshift }} +apiVersion: v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- end }} +metadata: + name: {{ template "instana-agent.fullname" . }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +rules: +- nonResourceURLs: + - "/version" + - "/healthz" + verbs: ["get"] +- apiGroups: ["batch"] + resources: + - "jobs" + - "cronjobs" + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: + - "deployments" + - "replicasets" + - "ingresses" + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: + - "deployments" + - "replicasets" + - "daemonsets" + - "statefulsets" + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - "namespaces" + - "events" + - "services" + - "endpoints" + - "nodes" + - "pods" + - "replicationcontrollers" + - "componentstatuses" + - "resourcequotas" + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - "endpoints" + verbs: ["create", "update", "patch"] +- apiGroups: ["networking.k8s.io"] + resources: + - "ingresses" + verbs: ["get", "list", "watch"] +{{- if .Values.agent.supportOpenshift }} +- apiGroups: ["apps.openshift.io"] + resources: + - "deploymentconfigs" + verbs: ["get", "list", "watch"] +{{- end -}} +{{- if .Values.podSecurityPolicy.enable}} +- apiGroups: ["policy"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "instana-agent.podSecurityPolicyName" . }} +{{- end -}} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrolebinding.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..4d8ab6adb --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/clusterrolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +kind: ClusterRoleBinding +{{- if .Values.agent.supportOpenshift }} +apiVersion: v1 +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +{{- end }} +metadata: + name: {{ template "instana-agent.fullname" . }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +subjects: +- kind: ServiceAccount + name: {{ template "instana-agent.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "instana-agent.fullname" . }} + {{- if not .Values.agent.supportOpenshift }} + apiGroup: rbac.authorization.k8s.io + {{- end -}} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/configmap.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/configmap.yaml new file mode 100644 index 000000000..11334323d --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/configmap.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "instana-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +data: + configuration.yaml: | + # Manual a-priori configuration. Configuration will be only used when the sensor + # is actually installed by the agent. + # The commented out example values represent example configuration and are not + # necessarily defaults. Defaults are usually 'absent' or mentioned separately. + # Changes are hot reloaded unless otherwise mentioned. + + # It is possible to create files called 'configuration-abc.yaml' which are + # merged with this file in file system order. So 'configuration-cde.yaml' comes + # after 'configuration-abc.yaml'. Only nested structures are merged, values are + # overwritten by subsequent configurations. + + # Secrets + # To filter sensitive data from collection by the agent, all sensors respect + # the following secrets configuration. If a key collected by a sensor matches + # an entry from the list, the value is redacted. + #com.instana.secrets: + # matcher: 'contains-ignore-case' # 'contains-ignore-case', 'contains', 'regex' + # list: + # - 'key' + # - 'password' + # - 'secret' + + # Host + #com.instana.plugin.host: + # tags: + # - 'dev' + # - 'app1' + + # Hardware & Zone + #com.instana.plugin.generic.hardware: + # enabled: true # disabled by default + # availability-zone: 'zone' + {{- if .Values.agent.configuration_yaml -}} + {{ .Values.agent.configuration_yaml | nindent 4 }} + {{- end }} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/daemonset.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/daemonset.yaml new file mode 100644 index 000000000..7918ec7a1 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/daemonset.yaml @@ -0,0 +1,210 @@ +{{- if .Values.agent.key -}} +{{- if or .Values.zone.name .Values.cluster.name -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "instana-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "instana-agent.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "instana-agent.commonLabels" . | nindent 8 }} + {{- if .Values.agent.pod.annotations }} + annotations: + {{- toYaml .Values.agent.pod.annotations | nindent 8 }} + {{- end }} + spec: + {{- if .Values.agent.pod.nodeSelector }} + nodeSelector: + {{- range $key, $value := .Values.agent.pod.nodeSelector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "instana-agent.serviceAccountName" . }} + hostIPC: true + hostNetwork: true + hostPID: true + containers: + - name: instana-agent + image: "{{ .Values.agent.image.name }}:{{ .Values.agent.image.tag }}" + imagePullPolicy: {{ .Values.agent.image.pullPolicy }} + env: + - name: INSTANA_AGENT_LEADER_ELECTOR_PORT + value: {{ .Values.leaderElector.port | quote }} + - name: INSTANA_ZONE + value: {{ .Values.zone.name | quote }} + {{- if .Values.cluster.name }} + - name: INSTANA_KUBERNETES_CLUSTER_NAME + value: {{ .Values.cluster.name | quote }} + {{- end }} + - name: INSTANA_AGENT_ENDPOINT + value: {{ .Values.agent.endpointHost | quote }} + - name: INSTANA_AGENT_ENDPOINT_PORT + value: {{ .Values.agent.endpointPort | quote }} + - name: INSTANA_AGENT_KEY + valueFrom: + secretKeyRef: + name: {{ template "instana-agent.fullname" . }} + key: key + {{- if .Values.agent.mode }} + - name: INSTANA_AGENT_MODE + value: {{ .Values.agent.mode | quote }} + {{- end }} + {{- if .Values.agent.downloadKey }} + - name: INSTANA_DOWNLOAD_KEY + valueFrom: + secretKeyRef: + name: {{ template "instana-agent.fullname" . }} + key: downloadKey + {{- end }} + {{- if .Values.agent.proxyHost }} + - name: INSTANA_AGENT_PROXY_HOST + value: {{ .Values.agent.proxyHost | quote }} + {{- end }} + {{- if .Values.agent.proxyPort }} + - name: INSTANA_AGENT_PROXY_PORT + value: {{ .Values.agent.proxyPort | quote }} + {{- end }} + {{- if .Values.agent.proxyProtocol }} + - name: INSTANA_AGENT_PROXY_PROTOCOL + value: {{ .Values.agent.proxyProtocol | quote }} + {{- end }} + {{- if .Values.agent.proxyUser }} + - name: INSTANA_AGENT_PROXY_USER + value: {{ .Values.agent.proxyUser | quote }} + {{- end }} + {{- if .Values.agent.proxyPassword }} + - name: INSTANA_AGENT_PROXY_PASSWORD + value: {{ .Values.agent.proxyPassword | quote }} + {{- end }} + {{- if .Values.agent.proxyUseDNS }} + - name: INSTANA_AGENT_PROXY_USE_DNS + value: {{ .Values.agent.proxyUseDNS | quote }} + {{- end }} + {{- if .Values.agent.listenAddress }} + - name: INSTANA_AGENT_HTTP_LISTEN + value: {{ .Values.agent.listenAddress | quote }} + {{- end }} + {{- if .Values.agent.redactKubernetesSecrets }} + - name: INSTANA_KUBERNETES_REDACT_SECRETS + value: {{ .Values.agent.redactKubernetesSecrets | quote }} + {{- end }} + - name: JAVA_OPTS + # Approximately 1/3 of container memory requests to allow for direct-buffer memory usage and JVM overhead + value: "-Xmx{{ div (default 512 .Values.agent.pod.requests.memory) 3 }}M -XX:+ExitOnOutOfMemoryError" + - name: INSTANA_AGENT_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- range $key, $value := .Values.agent.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + securityContext: + privileged: true + volumeMounts: + - name: dev + mountPath: /dev + - name: run + mountPath: /run + - name: var-run + mountPath: /var/run + - name: sys + mountPath: /sys + - name: var-log + mountPath: /var/log + - name: var-lib + mountPath: /var/lib/containers/storage + - name: machine-id + mountPath: /etc/machine-id + - name: configuration + subPath: configuration.yaml + mountPath: /root/configuration.yaml + {{- if .Values.agent.host.repository }} + - name: repo + mountPath: /opt/instana/agent/data/repo + {{- end }} + livenessProbe: + httpGet: + path: /status + port: 42699 + initialDelaySeconds: 300 + timeoutSeconds: 3 + resources: + requests: + memory: "{{ default 512 .Values.agent.pod.requests.memory }}Mi" + cpu: {{ default 0.5 .Values.agent.pod.requests.cpu }} + limits: + memory: "{{ default 512 .Values.agent.pod.limits.memory }}Mi" + cpu: {{ default 1.5 .Values.agent.pod.limits.cpu }} + ports: + - containerPort: 42699 + - name: instana-agent-leader-elector + image: "{{ .Values.leaderElector.image.name }}:{{ .Values.leaderElector.image.tag }}" + env: + - name: INSTANA_AGENT_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + command: + - "/busybox/sh" + - "-c" + - "sleep 12 && /app/server --election=instana --http=localhost:{{ .Values.leaderElector.port }} --id=$(INSTANA_AGENT_POD_NAME)" + resources: + requests: + cpu: 0.1 + memory: 64Mi + livenessProbe: + httpGet: # Leader elector liveness is tied to agent, published on localhost:42699 + path: /com.instana.agent.coordination.sidecar/health + port: 42699 + initialDelaySeconds: 300 + timeoutSeconds: 3 + ports: + - containerPort: {{ .Values.leaderElector.port }} + {{- if .Values.agent.pod.tolerations }} + tolerations: + {{- toYaml .Values.agent.pod.tolerations | nindent 8 }} + {{- end }} + volumes: + - name: dev + hostPath: + path: /dev + - name: run + hostPath: + path: /run + - name: var-run + hostPath: + path: /var/run + - name: sys + hostPath: + path: /sys + - name: var-log + hostPath: + path: /var/log + - name: var-lib + hostPath: + path: /var/lib/containers/storage + - name: machine-id + hostPath: + path: /etc/machine-id + - name: configuration + configMap: + name: {{ template "instana-agent.fullname" . }} + {{- if .Values.agent.host.repository }} + - name: repo + hostPath: + path: {{ .Values.agent.host.repository }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/namespace.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/namespace.yaml new file mode 100644 index 000000000..98f3d61b3 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/namespace.yaml @@ -0,0 +1,8 @@ +{{- if .Values.templating }} +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/podsecuritypolicy.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..6ade65a5f --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/podsecuritypolicy.yaml @@ -0,0 +1,55 @@ +{{- if .Values.rbac.create -}} +{{- if (and .Values.podSecurityPolicy.enable (not .Values.podSecurityPolicy.name)) -}} +kind: PodSecurityPolicy +apiVersion: policy/v1beta1 +metadata: + name: {{ template "instana-agent.podSecurityPolicyName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + volumes: + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - secret + - projected + - hostPath + allowedHostPaths: + - pathPrefix: "/dev" + readOnly: false + - pathPrefix: "/run" + readOnly: false + - pathPrefix: "/var/run" + readOnly: false + - pathPrefix: "/sys" + readOnly: false + - pathPrefix: "/var/log" + readOnly: false + - pathPrefix: "/etc/machine-id" + readOnly: false + - pathPrefix: "/var/lib/containers/storage" + readOnly: false + {{- if .Values.agent.host.repository }} + - pathPrefix: {{ .Values.agent.host.repository }} + readOnly: false + {{- end }} + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: "RunAsAny" + seLinux: + rule: "RunAsAny" + supplementalGroups: + rule: "RunAsAny" + fsGroup: + rule: "RunAsAny" +{{- end -}} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/templates/serviceaccount.yaml b/charts/instana-agent/instana-agent/1.0.2900/templates/serviceaccount.yaml new file mode 100644 index 000000000..0a0b52f8a --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "instana-agent.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "instana-agent.commonLabels" . | nindent 4 }} +{{- end -}} diff --git a/charts/instana-agent/instana-agent/1.0.2900/values.yaml b/charts/instana-agent/instana-agent/1.0.2900/values.yaml new file mode 100644 index 000000000..3b23ad868 --- /dev/null +++ b/charts/instana-agent/instana-agent/1.0.2900/values.yaml @@ -0,0 +1,111 @@ +# name is the value which will be used as the base resource name for various resources associated with the agent. +# name: instana-agent + +agent: + # agent.key is the secret token which your agent uses to authenticate to Instana's servers. + key: null + # agent.mode is used to set agent mode and it can be APM, INFRASTRUCTURE or AWS + # mode: APM + # agent.downloadKey is optional, if used it doesn't have to match agent.key + # downloadKey: null + # agent.listenAddress is the IP address the agent HTTP server will listen to. + # listenAddress: * + + # agent.endpointHost is the hostname of the Instana server your agents will connect to. + endpointHost: ingress-red-saas.instana.io + # agent.endpointPort is the port number (as a String) of the Instana server your agents will connect to. + endpointPort: 443 + + image: + # agent.image.name is the name of the container image of the Instana agent. + name: instana/agent + # agent.image.tag is the tag name of the agent container image. + tag: latest + # agent.image.pullPolicy specifies when to pull the image container. + pullPolicy: Always + + pod: + # agent.pod.annotations are additional annotations to be added to the agent pods. + annotations: {} + + # agent.pod.tolerations are tolerations to influence agent pod assignment. + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + requests: + # agent.pod.requests.memory is the requested memory allocation in MiB for the agent pods. + memory: 512 + # agent.pod.requests.cpu are the requested CPU units allocation for the agent pods. + cpu: 0.5 + limits: + # agent.pod.limits.memory set the memory allocation limits in MiB for the agent pods. + memory: 512 + # agent.pod.limits.cpu sets the CPU units allocation limits for the agent pods. + cpu: 1.5 + + # agent.proxyHost sets the INSTANA_AGENT_PROXY_HOST environment variable. + # proxyHost: null + # agent.proxyPort sets the INSTANA_AGENT_PROXY_PORT environment variable. + # proxyPort: null + # agent.proxyProtocol sets the INSTANA_AGENT_PROXY_PROTOCOL environment variable. + # proxyProtocol: null + # agent.proxyUser sets the INSTANA_AGENT_PROXY_USER environment variable. + # proxyUser: null + # agent.proxyPassword sets the INSTANA_AGENT_PROXY_PASSWORD environment variable. + # proxyPassword: null + # agent.proxyUseDNS sets the INSTANA_AGENT_PROXY_USE_DNS environment variable. + # proxyUseDNS: null + + # use this to set additional environment variables for the instana agent + # for example: + # env: + # INSTANA_AGENT_TAGS: dev + env: {} + + configuration_yaml: | + # Place agent configuration here + + # agent.redactKubernetesSecrets sets the INSTANA_KUBERNETES_REDACT_SECRETS environment variable. + # redactKubernetesSecrets: null + + # agent.host.repository sets a host path to be mounted as the agent maven repository (for debugging or development purposes) + host: + repository: null + + # agent.supportOpenshift specifies whether the cluster role should include openshift permissions + # supportOpenshift: true + +cluster: + # cluster.name represents the name that will be assigned to this cluster in Instana + name: null + +leaderElector: + image: + # leaderElector.image.name is the name of the container image of the leader elector. + name: instana/leader-elector + # leaderElector.image.tag is the tag name of the agent container image. + tag: 0.5.4 + port: 42655 + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and `create` is true, a name is generated using the fullname template + # name: instana-agent + +podSecurityPolicy: + # Specifies whether a PodSecurityPolicy should be authorized for the Instana Agent pods. + # Requires `rbac.create` to be `true` as well. + enable: false + # The name of an existing PodSecurityPolicy you would like to authorize for the Instana Agent pods. + # If not set and `enable` is true, a PodSecurityPolicy will be created with a name generated using the fullname template. + name: null + +zone: + # zone.name is the custom zone that detected technologies will be assigned to + name: null diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/.helmignore b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/.helmignore new file mode 100644 index 000000000..be86b789d --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# Helm files +OWNERS diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/Chart.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/Chart.yaml new file mode 100644 index 000000000..21103e362 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/Chart.yaml @@ -0,0 +1,17 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: TrilioVault for Kubernetes Operator + catalog.cattle.io/release-name: k8s-triliovault-operator +apiVersion: v1 +appVersion: v2.0.5 +description: K8s-TrilioVault-Operator is an operator designed to manage the K8s-TrilioVault + Application Lifecycle. +home: https://github.com/trilioData/k8s-triliovault-operator +icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png +maintainers: +- email: prafull.ladha@trilio.io + name: prafull11 +name: k8s-triliovault-operator +sources: +- https://github.com/trilioData/k8s-triliovault-operator +version: 2.0.500 diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/LICENSE b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/LICENSE new file mode 100644 index 000000000..76b559d3b --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/LICENSE @@ -0,0 +1 @@ +# Placeholder for the License if we decide to provide one diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/README.md b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/README.md new file mode 100644 index 000000000..600ce8dfd --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/README.md @@ -0,0 +1,41 @@ +# K8s-TrilioVault-Operator +This operator is to manage the lifecycle of TrilioVault Backup/Recovery solution. This operator install, updates and manage the TrilioVault application. + +## Introduction + +## Prerequisites + +- Kubernetes 1.13+ +- Alpha feature gates should be enabled +- PV provisioner support +- CSI driver should be installed + +## Installation + +To install the chart with the operator name `trilio`: + +```bash +# For helm version 2 +helm install --name trilio k8s-triliovault-operator + +# For helm version 3 +helm install --name-template trilio k8s-triliovault-operator +``` + +The command deploys the K8s-triliovault-operator with the default configuration. + +## Uninstall + +To uninstall/delete the chart `trilio` : + +```bash +# For helm version 2 +helm delete trilio --purge + +# For helm version 3 +helm uninstall trilio +``` + +## Configuration + +TODO: Add possible configuration in helm chart. diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/app-readme.md b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/app-readme.md new file mode 100644 index 000000000..65a2b3495 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/app-readme.md @@ -0,0 +1,37 @@ +# TrilioVault for Kubernetes + +[K8s-TrilioVault-Operator](https://trilio.io) is an operator designed to manage +the K8s-TrilioVault Application Lifecycle. + +This operator is to manage the lifecycle of TrilioVault Backup/Recovery solution. This operator install, updates and manage the TrilioVault application. + +Introduction: + +Prerequisites: + +Kubernetes 1.17+ +Alpha feature gates should be enabled +PV provisioner support +CSI driver should be installed + +Installation: + +To install the chart with the operator name trilio: + +helm install k8s-triliovault-operator triliovault-operator/k8s-triliovault-operator + +# For helm version 3 + +helm install triliovault-operator triliovault-operator/k8s-triliovault-operator + +The command deploys the Triliovault for Kubernetes Operator with the default configuration. + +Uninstall: + +To uninstall/delete the chart trilio : + +# For helm version 3 +helm uninstall k8s-triliovault-operator + +For more information around TVM manager installation, please follow below link: +https://docs.trilio.io/kubernetes/use-triliovault/installing-triliovault diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/NOTES.txt b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/NOTES.txt new file mode 100644 index 000000000..19cd282d3 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that TrilioVault Operator has started, run: + + kubectl --namespace={{ .Release.Namespace }} get deployments -l "release={{ .Release.Name }}" \ No newline at end of file diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/_helpers.tpl b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/_helpers.tpl new file mode 100644 index 000000000..7cea76a18 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/_helpers.tpl @@ -0,0 +1,33 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-triliovault-operator.name" -}} +{{- default .Release.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "k8s-triliovault-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper TrilioVault Operator image name +*/}} +{{- define "k8s-triliovault-operator.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole.yaml new file mode 100644 index 000000000..47e151e09 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole.yaml @@ -0,0 +1,53 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{template "k8s-triliovault-operator.name" .}}-{{.Release.Namespace}}-manager-role +rules: +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaulthpas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaulthpas/status + verbs: + - get + - patch + - update +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaultmanagers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaultmanagers/status + verbs: + - get + - patch + - update +- apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole_binding.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole_binding.yaml new file mode 100644 index 000000000..6b69f7acf --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/clusterrole_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-{{ .Release.Namespace }}-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-triliovault-operator.name" . }}-{{ .Release.Namespace }}-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/deployment.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/deployment.yaml new file mode 100644 index 000000000..c0a850409 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: k8s-triliovault-operator + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + spec: + containers: + - name: k8s-triliovault-operator + image: {{ .Values.registry }}/{{ index .Values "k8s-triliovault-operator" "repository" }}:{{ .Values.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: TVK_ENV + value: {{ .Values.tvkEnv }} + - name: REGISTRY + value: {{ .Values.registry }} + - name: ADMISSION_MUTATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-mutating-webhook-configuration + - name: ADMISSION_VALIDATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-validating-webhook-configuration + - name: NAMESPACE_VALIDATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-ns-validating-webhook-configuration + volumeMounts: + {{- if .Values.tls.enable }} + - name: helm-tls-certs + mountPath: /root/.helm + readOnly: true + {{- if .Values.tls.verify }} + - name: helm-tls-ca + mountPath: /root/.helm/ca.crt + readOnly: true + {{- end }} + {{- end }} + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-certs + readOnly: true + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 10Mi + initContainers: + - name: webhook-init + image: {{ .Values.registry }}/{{ index .Values "operator-webhook-init" "repository" }}:{{ .Values.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: ADMISSION_MUTATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-mutating-webhook-configuration + - name: ADMISSION_VALIDATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-validating-webhook-configuration + - name: NAMESPACE_VALIDATION_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-ns-validating-webhook-configuration + - name: WEBHOOK_SERVICE + value: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + - name: WEBHOOK_NAMESPACE + value: {{ .Release.Namespace }} + - name: SECRET_NAME + value: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs + {{- if .Values.nodeSelector }} + nodeSelector: {{- .Values.nodeSelector | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.tls.enable }} + - name: helm-tls-certs + secret: + secretName: {{ .Values.tls.secretName }} + defaultMode: 0400 + {{- if .Values.tls.verify }} + - name: helm-tls-ca + configMap: + name: {{ template "helm-operator.fullname" . }}-helm-tls-ca-config + defaultMode: 0600 + {{- end }} + {{- end }} + - name: webhook-certs + secret: + defaultMode: 420 + secretName: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/mutating-webhook.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/mutating-webhook.yaml new file mode 100644 index 000000000..6a17a0e1e --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/mutating-webhook.yaml @@ -0,0 +1,24 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /mutate-triliovault-trilio-io-v1-triliovaultmanager + failurePolicy: Fail + name: v1-tvm-mutation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers + sideEffects: None diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/ns-validating-webhook.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/ns-validating-webhook.yaml new file mode 100644 index 000000000..a51d3f375 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/ns-validating-webhook.yaml @@ -0,0 +1,30 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-ns-validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-core-v1-namespace + failurePolicy: Fail + name: v1-tvm-ns-validation.trilio.io + namespaceSelector: + matchExpressions: + - key: trilio-operator-label + operator: In + values: + - {{ .Release.Namespace }} + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - DELETE + resources: + - namespaces + scope: '*' + sideEffects: None diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/secret.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/secret.yaml new file mode 100644 index 000000000..ea1faf3e1 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs + namespace: {{ .Release.Namespace }} +type: Opaque diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/triliovault.trilio.io_triliovaultmanagers.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/triliovault.trilio.io_triliovaultmanagers.yaml new file mode 100644 index 000000000..c1e40f2e7 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/triliovault.trilio.io_triliovaultmanagers.yaml @@ -0,0 +1,826 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + name: triliovaultmanagers.triliovault.trilio.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.trilioVaultAppVersion + name: TrilioVault-Version + type: string + - JSONPath: .spec.applicationScope + name: Scope + type: string + - JSONPath: .status.conditions.type + name: Status + type: string + - JSONPath: .spec.restoreNamespaces + name: Restore-Namespaces + type: string + group: triliovault.trilio.io + names: + kind: TrilioVaultManager + listKind: TrilioVaultManagerList + plural: triliovaultmanagers + shortNames: + - tvm + singular: triliovaultmanager + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: TrilioVaultManager is the Schema for the triliovaultmanagers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrilioVaultManagerSpec defines the desired state of TrilioVaultManager + properties: + affinity: + description: The scheduling constraints on application pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all + objects with implicit weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no objects (i.e. is also + a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The + terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the + operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be + empty. If the operator is Gt or Lt, the values + array must have a single element, which will + be interpreted as an integer. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the affinity expressions specified by this field, + but it may choose a node that violates one or more of the + expressions. The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node that meets + all of the scheduling requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the sum + if the node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may not + try to eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some other + pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes + that satisfy the anti-affinity expressions specified by this + field, but it may choose a node that violates one or more + of the expressions. The node that is most preferred is the + one with the greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field + and adding "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the node(s) with + the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms must + be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) that + this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label query over a set of resources, in + this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any + node on which any of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + applicationScope: + description: Scope for the application which will be installed in the + cluster NamespaceScope or ClusterScope + enum: + - Cluster + - Namespaced + type: string + dataJobLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: DataJobLimits are the resource limits for all the data + processing jobs. + type: object + deploymentLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: DeploymentLimits are the resource limits for all the deployments. + type: object + helmValues: + description: HelmValues holds all the additional fields in the values.yaml + of TVK helm chart. + type: object + helmVersion: + description: 'Deprecated: Helm Version' + properties: + tillerNamespace: + type: string + version: + enum: + - v3 + type: string + required: + - version + type: object + metadataJobLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: MetadataJobLimits are the resource limits for all the meta + processing jobs. + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For the + pod to be eligible to run on a node, the node must have each of the + indicated key-value pairs as labels. + type: object + resources: + description: 'Deprecated: Resources are the resource requirements for + the containers.' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + restoreNamespaces: + description: 'Deprecated: RestoreNamespaces are the namespace where + you want to restore your applications. Restore Namespaces depends + on your k8s RBAC' + items: + type: string + type: array + tolerations: + description: The toleration of application against the specific taints + on the nodes + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, operator + must be Exists; this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. Exists + is equivalent to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the + toleration (which must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By default, it is not + set, which means tolerate the taint forever (do not evict). + Zero and negative values will be treated as 0 (evict immediately) + by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise + just a regular string. + type: string + type: object + type: array + trilioVaultAppVersion: + description: Helm Chart version + type: string + required: + - applicationScope + type: object + status: + description: TrilioVaultManagerStatus defines the observed state of TrilioVaultManager + properties: + conditions: + properties: + lastTransitionTime: + format: date-time + nullable: true + type: string + message: + minLength: 0 + type: string + reason: + enum: + - InstallSuccessful + - UpdateSuccessful + - UninstallSuccessful + - InstallError + - UpdateError + - ReconcileError + - UninstallError + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + enum: + - Initialized + - Deployed + - Updated + - ReleaseFailed + - Irreconcilable + type: string + type: object + deployedRelease: + properties: + manifest: + type: string + name: + type: string + type: object + releaseVersion: + type: string + required: + - conditions + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/validating-webhook.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/validating-webhook.yaml new file mode 100644 index 000000000..fe001ffe9 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/validating-webhook.yaml @@ -0,0 +1,24 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-triliovault-trilio-io-v1-triliovaultmanager + failurePolicy: Fail + name: v1-tvm-validation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers + sideEffects: None diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/webhook-service.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/webhook-service.yaml new file mode 100644 index 000000000..68f7a53c6 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/templates/webhook-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" \ No newline at end of file diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/values.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/values.yaml new file mode 100644 index 000000000..c95368c61 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/2.0.500/values.yaml @@ -0,0 +1,33 @@ +## TrilioVault Operator +registry: "eu.gcr.io/amazing-chalice-243510" + +operator-webhook-init: + repository: operator-webhook-init + +k8s-triliovault-operator: + repository: k8s-triliovault-operator + +tag: "v2.0.5" + +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + +image: + pullPolicy: Always +tls: + secretName: "helm-client-certs" + verify: false + enable: false + keyFile: "tls.key" + certFile: "tls.crt" + caContent: "" + hostname: "" + +nameOverride: "" diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/.helmignore b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/.helmignore new file mode 100644 index 000000000..be86b789d --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# Helm files +OWNERS diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/Chart.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/Chart.yaml new file mode 100644 index 000000000..1fdd56a76 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/Chart.yaml @@ -0,0 +1,15 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: k8s-triliovault-operator +apiVersion: v1 +appVersion: v2.0.2 +description: K8s-TrilioVault-Operator is an operator designed to manage the K8s-TrilioVault Application Lifecycle. +home: https://github.com/trilioData/k8s-triliovault-operator +icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png +maintainers: +- email: prafull.ladha@trilio.io + name: prafull11 +name: k8s-triliovault-operator +sources: +- https://github.com/trilioData/k8s-triliovault-operator +version: v2.0.200 diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/LICENSE b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/LICENSE new file mode 100644 index 000000000..76b559d3b --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/LICENSE @@ -0,0 +1 @@ +# Placeholder for the License if we decide to provide one diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/README.md b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/README.md new file mode 100644 index 000000000..600ce8dfd --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/README.md @@ -0,0 +1,41 @@ +# K8s-TrilioVault-Operator +This operator is to manage the lifecycle of TrilioVault Backup/Recovery solution. This operator install, updates and manage the TrilioVault application. + +## Introduction + +## Prerequisites + +- Kubernetes 1.13+ +- Alpha feature gates should be enabled +- PV provisioner support +- CSI driver should be installed + +## Installation + +To install the chart with the operator name `trilio`: + +```bash +# For helm version 2 +helm install --name trilio k8s-triliovault-operator + +# For helm version 3 +helm install --name-template trilio k8s-triliovault-operator +``` + +The command deploys the K8s-triliovault-operator with the default configuration. + +## Uninstall + +To uninstall/delete the chart `trilio` : + +```bash +# For helm version 2 +helm delete trilio --purge + +# For helm version 3 +helm uninstall trilio +``` + +## Configuration + +TODO: Add possible configuration in helm chart. diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/app-readme.md b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/app-readme.md new file mode 100644 index 000000000..2836184dd --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/app-readme.md @@ -0,0 +1,4 @@ +# TrilioVault for Kubernetes + +[K8s-TrilioVault-Operator](https://trilio.io) is an operator designed to manage the K8s-TrilioVault Application +Lifecycle. diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/NOTES.txt b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/NOTES.txt new file mode 100644 index 000000000..19cd282d3 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/NOTES.txt @@ -0,0 +1,3 @@ +To verify that TrilioVault Operator has started, run: + + kubectl --namespace={{ .Release.Namespace }} get deployments -l "release={{ .Release.Name }}" \ No newline at end of file diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/_helpers.tpl b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/_helpers.tpl new file mode 100644 index 000000000..7cea76a18 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/_helpers.tpl @@ -0,0 +1,33 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-triliovault-operator.name" -}} +{{- default .Release.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "k8s-triliovault-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper TrilioVault Operator image name +*/}} +{{- define "k8s-triliovault-operator.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole.yaml new file mode 100644 index 000000000..47e151e09 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole.yaml @@ -0,0 +1,53 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{template "k8s-triliovault-operator.name" .}}-{{.Release.Namespace}}-manager-role +rules: +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaulthpas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaulthpas/status + verbs: + - get + - patch + - update +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaultmanagers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - triliovault.trilio.io + resources: + - triliovaultmanagers/status + verbs: + - get + - patch + - update +- apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole_binding.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole_binding.yaml new file mode 100644 index 000000000..6b69f7acf --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/clusterrole_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-{{ .Release.Namespace }}-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-triliovault-operator.name" . }}-{{ .Release.Namespace }}-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ .Release.Namespace }} diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/deployment.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/deployment.yaml new file mode 100644 index 000000000..a57ad1025 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/deployment.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: k8s-triliovault-operator + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" + replicas: {{ .Values.replicaCount }} + template: + metadata: + labels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + spec: + containers: + - name: k8s-triliovault-operator + image: {{ .Values.registry }}/{{ index .Values "k8s-triliovault-operator" "repository" }}:{{ .Values.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: TVK_ENV + value: {{ .Values.tvkEnv }} + volumeMounts: + {{- if .Values.tls.enable }} + - name: helm-tls-certs + mountPath: /root/.helm + readOnly: true + {{- if .Values.tls.verify }} + - name: helm-tls-ca + mountPath: /root/.helm/ca.crt + readOnly: true + {{- end }} + {{- end }} + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-certs + readOnly: true + resources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 10m + memory: 10Mi + initContainers: + - name: webhook-init + image: {{ .Values.registry }}/{{ index .Values "operator-webhook-init" "repository" }}:{{ .Values.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: MUTATE_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-mutating-webhook-configuration + - name: VALIDATE_CONFIG + value: {{ template "k8s-triliovault-operator.name" . }}-validating-webhook-configuration + - name: WEBHOOK_SERVICE + value: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + - name: WEBHOOK_NAMESPACE + value: {{ .Release.Namespace }} + - name: SECRET_NAME + value: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs + volumes: + {{- if .Values.tls.enable }} + - name: helm-tls-certs + secret: + secretName: {{ .Values.tls.secretName }} + defaultMode: 0400 + {{- if .Values.tls.verify }} + - name: helm-tls-ca + configMap: + name: {{ template "helm-operator.fullname" . }}-helm-tls-ca-config + defaultMode: 0600 + {{- end }} + {{- end }} + - name: webhook-certs + secret: + defaultMode: 420 + secretName: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/mutating-webhook.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/mutating-webhook.yaml new file mode 100644 index 000000000..466916239 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/mutating-webhook.yaml @@ -0,0 +1,41 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /mutate-triliovault-trilio-io-v1-triliovaultmanager + failurePolicy: Fail + name: v1-tvm-mutation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /mutate-triliovault-trilio-io-v1alpha1-triliovaultmanager + failurePolicy: Fail + name: v1alpha1-tvm-mutation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/secret.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/secret.yaml new file mode 100644 index 000000000..ea1faf3e1 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-certs + namespace: {{ .Release.Namespace }} +type: Opaque diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/triliovault.trilio.io_triliovaultmanagers.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/triliovault.trilio.io_triliovaultmanagers.yaml new file mode 100644 index 000000000..6e9ae260e --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/triliovault.trilio.io_triliovaultmanagers.yaml @@ -0,0 +1,1605 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + name: triliovaultmanagers.triliovault.trilio.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.trilioVaultAppVersion + name: TrilioVault-Version + type: string + - JSONPath: .spec.applicationScope + name: Scope + type: string + - JSONPath: .status.conditions.type + name: Status + type: string + - JSONPath: .spec.restoreNamespaces + name: Restore-Namespaces + type: string + group: triliovault.trilio.io + names: + kind: TrilioVaultManager + listKind: TrilioVaultManagerList + plural: triliovaultmanagers + singular: triliovaultmanager + scope: Namespaced + subresources: + status: {} + version: v1 + versions: + - name: v1 + schema: + openAPIV3Schema: + description: TrilioVaultManager is the Schema for the triliovaultmanagers + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrilioVaultManagerSpec defines the desired state of TrilioVaultManager + properties: + affinity: + description: The scheduling constraints on application pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + applicationScope: + description: Scope for the application which will be installed in + the cluster NamespaceScope or ClusterScope + enum: + - Cluster + - Namespaced + type: string + dataJobLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: DataJobLimits are the resource limits for all the data + processing jobs. + type: object + deploymentLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: DeploymentLimits are the resource limits for all the + deployments. + type: object + helmVersion: + description: Helm Version + properties: + tillerNamespace: + type: string + version: + enum: + - v2 + - v3 + type: string + required: + - version + type: object + metadataJobLimits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: MetadataJobLimits are the resource limits for all the + meta processing jobs. + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For + the pod to be eligible to run on a node, the node must have each + of the indicated key-value pairs as labels. + type: object + resources: + description: 'Deprecated: Resources are the resource requirements + for the containers.' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + restoreNamespaces: + description: ResourceNamespaces are the namespace where you want to + restore your applications + items: + type: string + type: array + tolerations: + description: The toleration of application against the specific taints + on the nodes + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + trilioVaultAppVersion: + description: Helm Chart version + type: string + required: + - applicationScope + - helmVersion + type: object + status: + description: TrilioVaultManagerStatus defines the observed state of TrilioVaultManager + properties: + conditions: + properties: + lastTransitionTime: + format: date-time + nullable: true + type: string + message: + minLength: 0 + type: string + reason: + enum: + - InstallSuccessful + - UpdateSuccessful + - UninstallSuccessful + - InstallError + - UpdateError + - ReconcileError + - UninstallError + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + enum: + - Initialized + - Deployed + - ReleaseFailed + - Irreconcilable + type: string + type: object + deployedRelease: + properties: + manifest: + type: string + name: + type: string + type: object + required: + - conditions + type: object + type: object + served: true + storage: true + - name: v1alpha1 + schema: + openAPIV3Schema: + description: TrilioVaultManager is the Schema for the triliovaultmanagers + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TrilioVaultManagerSpec defines the desired state of TrilioVaultManager + properties: + affinity: + description: The scheduling constraints on application pods. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + applicationScope: + description: Scope for the application which will be installed in + the cluster NamespaceScope or ClusterScope + enum: + - Cluster + - Namespaced + type: string + helmVersion: + description: Helm Version + properties: + tillerNamespace: + type: string + version: + enum: + - v2 + - v3 + type: string + required: + - version + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector specifies a map of key-value pairs. For + the pod to be eligible to run on a node, the node must have each + of the indicated key-value pairs as labels. + type: object + resources: + description: Resources is the resource requirements for the containers. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + restoreNamespaces: + description: ResourceNamespaces are the namespace where you want to + restore your applications + items: + type: string + type: array + tolerations: + description: The toleration of application against the specific taints + on the nodes + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + trilioVaultAppVersion: + description: Helm Chart version + type: string + required: + - applicationScope + - helmVersion + type: object + status: + properties: + conditions: + properties: + lastTransitionTime: + format: date-time + type: string + message: + minLength: 0 + type: string + reason: + enum: + - InstallSuccessful + - UpdateSuccessful + - UninstallSuccessful + - InstallError + - UpdateError + - ReconcileError + - UninstallError + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + enum: + - Initialized + - Deployed + - ReleaseFailed + - Irreconcilable + type: string + type: object + deployedRelease: + properties: + manifest: + type: string + name: + type: string + type: object + required: + - conditions + type: object + type: object + served: true + storage: false +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/validating-webhook.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/validating-webhook.yaml new file mode 100644 index 000000000..41bd41dcc --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/validating-webhook.yaml @@ -0,0 +1,41 @@ +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ template "k8s-triliovault-operator.name" . }}-validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-triliovault-trilio-io-v1-triliovaultmanager + failurePolicy: Fail + name: v1-tvm-validation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers +- clientConfig: + caBundle: Cg== + service: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-triliovault-trilio-io-v1alpha1-triliovaultmanager + failurePolicy: Fail + name: v1alpha1-tvm-validation.trilio.io + rules: + - apiGroups: + - triliovault.trilio.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - triliovaultmanagers diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/webhook-service.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/webhook-service.yaml new file mode 100644 index 000000000..68f7a53c6 --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/templates/webhook-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "k8s-triliovault-operator.fullname" . }}-webhook-service + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app: {{ template "k8s-triliovault-operator.fullname" . }} + release: "{{ .Release.Name }}" \ No newline at end of file diff --git a/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/values.yaml b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/values.yaml new file mode 100644 index 000000000..e8bfb211d --- /dev/null +++ b/charts/k8s-triliovault-operator/k8s-triliovault-operator/v2.0.200/values.yaml @@ -0,0 +1,23 @@ +## TrilioVault Operator +registry: "eu.gcr.io/amazing-chalice-243510" + +operator-webhook-init: + repository: operator-webhook-init + +k8s-triliovault-operator: + repository: k8s-triliovault-operator + +tag: "v2.0.2" + +image: + pullPolicy: Always +tls: + secretName: "helm-client-certs" + verify: false + enable: false + keyFile: "tls.key" + certFile: "tls.crt" + caContent: "" + hostname: "" + +nameOverride: "" diff --git a/charts/kubecost/cost-analyzer/1.70.000/Chart.yaml b/charts/kubecost/cost-analyzer/1.70.000/Chart.yaml new file mode 100644 index 000000000..6b33309a1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: kubecost +apiVersion: v1 +appVersion: 1.70.0 +description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor + cloud costs. +icon: https://kubecost.com/images/logo-white.png +name: cost-analyzer +version: 1.70.000 diff --git a/charts/kubecost/cost-analyzer/1.70.000/app-readme.md b/charts/kubecost/cost-analyzer/1.70.000/app-readme.md new file mode 100644 index 000000000..dfb1e5854 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/app-readme.md @@ -0,0 +1,23 @@ +# Kubecost +[Kubecost](https://kubecost.com/) is an open-source Kubernetes cost monitoring solution. + +Kubecost gives teams visibility into current and historical Kubernetes spend and resource allocation. These models provide cost transparency in Kubernetes environments that support multiple applications, teams, departments, etc. + +To see more on the functionality of the full Kubecost product, please visit the [features page](https://kubecost.com/#features) on our website. + +Here is a summary of features enabled by this cost model: + +- Real-time cost allocation by Kubernetes service, deployment, namespace, label, statefulset, daemonset, pod, and container +- Dynamic asset pricing enabled by integrations with AWS, Azure, and GCP billing APIs +- Supports on-prem k8s clusters with custom pricing sheets +- Allocation for in-cluster resources like CPU, GPU, memory, and persistent volumes. +- Allocation for AWS & GCP out-of-cluster resources like RDS instances and S3 buckets with key (optional) +- Easily export pricing data to Prometheus with /metrics endpoint ([learn more](PROMETHEUS.md)) +- Free and open source distribution (Apache2 license) + +## Requirements +- Kubernetes 1.8+ +- kube-state-metrics +- Grafana +- Prometheus +- Node Exporter diff --git a/charts/kubecost/cost-analyzer/1.70.000/attached-disks.json b/charts/kubecost/cost-analyzer/1.70.000/attached-disks.json new file mode 100644 index 000000000..0ae9d4411 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/attached-disks.json @@ -0,0 +1,425 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 10, + "iteration": 1589748792557, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_fs_limit_bytes{instance=~'$disk', device!=\"tmpfs\", id=\"/\"}) by (instance)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Size", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_fs_usage_bytes{instance=~'$disk',id=\"/\"}) by (instance) / sum(container_fs_limit_bytes{instance=~'$disk',device!=\"tmpfs\", id=\"/\"}) by (instance)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(container_fs_inodes_free{instance=~'$disk',id=\"/\"}) by (instance) / sum(container_fs_inodes_total{instance=~'$disk',id=\"/\"}) by (instance)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "iNode Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_fs_usage_bytes{instance=~'$disk',id=\"/\"}) by (instance)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "cost", + "utilization", + "metrics" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "disk", + "options": [], + "query": "query_result(sum(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance))", + "refresh": 1, + "regex": "/instance=\\\"(.*?)(\\\")/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-7d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Attached disk metrics", + "uid": "nBH7qBgMk", + "version": 2 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/Chart.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/Chart.yaml new file mode 100644 index 000000000..eb46b5995 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +appVersion: 6.0.0 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +name: grafana +sources: +- https://github.com/grafana/grafana +version: 1.17.2 diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/README.md b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/README.md new file mode 100644 index 000000000..03c70b520 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/README.md @@ -0,0 +1,162 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## TL;DR; + +```console +$ helm install stable/grafana +``` + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + + +## Configuration + +| Parameter | Description | Default | +|---------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `deploymentStrategy` | Deployment strategy | `RollingUpdate` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Rediness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "fsGroup": 472}` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag. (`Must be >= 5.0.0`) | `5.3.1` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.hosts` | Ingress accepted hostnames | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `""` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envFromSecret` | Name of a Kubenretes secret (must be manually created in the same namespace) containing values to be added to the environment | `""` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config ` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `sidecar.dashboards.enabled` | Enabled the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `false` | +| `sidecar.datasources.enabled` | Enabled the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials, this must have the keys `user` and `password`. | `""` | + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported dashboards are deleted/updated. A recommendation is to use one configmap per dashboard, as an reduction of multiple dashboards inside one configmap is currently not properly mirrored in grafana. +Example dashboard config: +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: 1 +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasource.enabled` is set, a sidecar container is deployed in the grafana pod. This container watches all config maps in the cluster and filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in those configmaps are written to a folder and accessed by grafana on startup. Using these yaml files, the data sources in grafana can be modified. + +Example datasource config adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-datasource + labels: + grafana_datasource: 1 +data: + datasource.yaml: |- + # config file version + apiVersion: 1 + + # list of datasources that should be deleted from the database + deleteDatasources: + - name: Graphite + orgId: 1 + + # list of datasources to insert/update depending + # whats available in the database + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false + +``` diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/NOTES.txt b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/NOTES.txt new file mode 100644 index 000000000..57e84cd69 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/NOTES.txt @@ -0,0 +1,37 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ .Release.Namespace }}.svc +{{ if .Values.ingress.enabled }} + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.fullname" . }},component={{ .Values.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/_helpers.tpl new file mode 100644 index 000000000..3a3ebd3ec --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrole.yaml new file mode 100644 index 000000000..d49193651 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.rbac.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps"] + verbs: ["get", "watch", "list"] +{{- else }} +rules: [] +{{- end}} +{{- end}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..99dada9f4 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,25 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "grafana.fullname" . }}-clusterrole + apiGroup: rbac.authorization.k8s.io +{{- end}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 000000000..1765e9aff --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,28 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + options: + path: {{ .Values.sidecar.dashboards.folder }} +{{- end}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap.yaml new file mode 100644 index 000000000..d283d04c0 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/configmap.yaml @@ -0,0 +1,76 @@ +{{ if .Values.global.grafana.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{ $elem }} = {{ $elemVal }} + {{- end }} +{{- end }} + +{{- if .Values.datasources }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ toYaml $value | trim | indent 4 }} + {{- end -}} +{{- end -}} +{{- if .Values.global.prometheus.enabled }} + - access: proxy + isDefault: true + name: Prometheus + type: prometheus + url: http://{{ template "cost-analyzer.prometheus.server.name" . }}.{{ .Release.Namespace }}.svc +{{ else }} + - access: proxy + isDefault: true + name: Prometheus + type: prometheus + url: {{ .Values.global.prometheus.fqdn }} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -sk \ + --connect-timeout 60 \ + --max-time 60 \ + -H "Accept: application/json" \ + -H "Content-Type: application/json;charset=UTF-8" \ + {{- if $value.url -}}{{ $value.url }}{{- else -}} https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download{{- end -}}{{ if $value.datasource }}| sed 's|\"datasource\":[^,]*|\"datasource\": \"{{ $value.datasource }}\"|g'{{ end }} \ + > /var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 000000000..c1ab8c4ba --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,24 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.dashboards }} + {{- range $provider, $dashboards := .Values.dashboards }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + labels: + app: {{ template "grafana.name" $ }} + chart: {{ template "grafana.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} + dashboard-provider: {{ $provider }} +data: + {{- range $key, $value := $dashboards }} + {{- if hasKey $value "json" }} + {{ $key }}.json: | +{{ $value.json | indent 4 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/deployment.yaml new file mode 100644 index 000000000..ea8e814f3 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/deployment.yaml @@ -0,0 +1,272 @@ +{{ if .Values.global.grafana.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} + strategy: + type: {{ .Values.deploymentStrategy }} + {{- if ne .Values.deploymentStrategy "RollingUpdate" }} + rollingUpdate: null + {{- end }} + template: + metadata: + labels: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} +{{- with .Values.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +{{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 8 }} +{{- end }} +{{- if .Values.dashboards }} + initContainers: + - name: download-dashboards + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["sh", "/etc/grafana/download_dashboards.sh"] + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + subPath: {{ .Values.persistence.subPath }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + image: "{{ .Values.sidecar.image }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}" + resources: +{{ toYaml .Values.sidecar.resources | indent 12 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + image: "{{ .Values.sidecar.image }}" + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + resources: +{{ toYaml .Values.sidecar.resources | indent 12 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml +{{- if .Values.dashboards }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if hasKey $value "json" }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} + {{- range keys .Values.dashboardsConfigMaps }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} + - name: storage + mountPath: "/var/lib/grafana" + subPath: {{ .Values.persistence.subPath }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + ports: + - name: service + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: grafana + containerPort: 3000 + protocol: TCP + env: + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ template "grafana.fullname" . }} + key: admin-user + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "grafana.fullname" . }} + key: admin-password + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: user + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: password + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 12 }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} + {{- if .Values.dashboards }} + {{- range keys .Values.dashboards }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ $name }} + {{- end }} + {{- end }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + - name: storage + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} + {{- else }} + emptyDir: {} + {{- end -}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} + {{- end -}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/ingress.yaml new file mode 100644 index 000000000..556f0fab0 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/ingress.yaml @@ -0,0 +1,44 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..e2b47f42b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/pvc.yaml new file mode 100644 index 000000000..e13c78378 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/pvc.yaml @@ -0,0 +1,26 @@ +{{ if .Values.global.grafana.enabled }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + storageClassName: {{ .Values.persistence.storageClassName }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/role.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/role.yaml new file mode 100644 index 000000000..6c9195769 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/role.yaml @@ -0,0 +1,20 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- if .Values.rbac.pspEnabled }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/rolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/rolebinding.yaml new file mode 100644 index 000000000..e777ddabc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "grafana.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/secret.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/secret.yaml new file mode 100644 index 000000000..cd16c2fc7 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/secret.yaml @@ -0,0 +1,22 @@ +{{ if .Values.global.grafana.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +type: Opaque +data: + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ randAlphaNum 40 | b64enc | quote }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ .Values.ldap.config | b64enc | quote }} + {{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/service.yaml new file mode 100644 index 000000000..307d0abf1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/service.yaml @@ -0,0 +1,51 @@ +{{ if .Values.global.grafana.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + chart: {{ template "grafana.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: service + port: {{ .Values.service.port }} + protocol: TCP + targetPort: 3000 +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + selector: + app: {{ template "grafana.name" . }} + release: {{ .Release.Name }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/serviceaccount.yaml new file mode 100644 index 000000000..bbcb5055e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{ if .Values.global.grafana.enabled }} +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "grafana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "grafana.serviceAccountName" . }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/values.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/values.yaml new file mode 100644 index 000000000..504a3e86a --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/grafana/values.yaml @@ -0,0 +1,275 @@ +rbac: + create: true + pspEnabled: true +serviceAccount: + create: true + name: + +replicas: 1 + +deploymentStrategy: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +image: + repository: grafana/grafana + tag: 7.1.1 + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +securityContext: + runAsUser: 472 + fsGroup: 472 + +downloadDashboardsImage: + repository: appropriate/curl + tag: latest + pullPolicy: IfNotPresent + +## Pod Annotations +# podAnnotations: {} + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 80 + annotations: {} + labels: {} + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: false + # storageClassName: default + # accessModes: + # - ReadWriteOnce + # size: 10Gi + # annotations: {} + # subPath: "" + # existingClaim: + +adminUser: admin +adminPassword: strongpassword + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +env: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc +envFromSecret: "" + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus2 +# type: prometheus +# url: http://prometheus-server.default.svc +# access: proxy +# isDefault: false + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} +# default: +# prometheus-stats: +# gnetId: 3662 +# revision: 2 +# datasource: Prometheus + +## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + auth.anonymous: + enabled: true + org_role: Admin + org_name: Main Org. + +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana in keys `user` and `password`. + existingSecret: "" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: kiwigrid/k8s-sidecar:0.1.144 + imagePullPolicy: IfNotPresent + resources: +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + dashboards: + enabled: false + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # folder in the pod that should hold the collected dashboards + folder: /tmp/dashboards + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/.helmignore b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/.helmignore new file mode 100644 index 000000000..825c00779 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +OWNERS diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/Chart.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/Chart.yaml new file mode 100644 index 000000000..dd81a9c69 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 2.17.2 +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +version: 11.0.2 diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/README.md b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/README.md new file mode 100644 index 000000000..dc32dcd15 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/README.md @@ -0,0 +1,471 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +## TL;DR; + +```console +$ helm install stable/prometheus +``` + +## Introduction + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.3+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install --name my-release stable/prometheus +``` + +The command deploys Prometheus on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Prometheus 2.x + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/) + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +## Upgrading from previous chart versions. + +Version 9.0 adds a new option to enable or disable the Prometheus Server. +This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. +To install the server `server.enabled` must be set to `true`. + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +### Example migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ``` + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ``` + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus +to scrape a variety of kubernetes resource types, provided they have the correct annotations. +In this section we describe how to configure pods to be scraped; +for information on how other resource types can be scraped you can +do a `helm template` to get the kubernetes resource definitions, +and then reference the prometheus configuration in the ConfigMap against the prometheus documentation +for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) +and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +``` +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +spec: +... +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. +`prometheus.io/port` should be set to the port that your pod serves metrics from. +Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be +enclosed in double quotes. + +## Configuration + +The following table lists the configurable parameters of the Prometheus chart and their default values. + +Parameter | Description | Default +--------- | ----------- | ------- +`alertmanager.enabled` | If true, create alertmanager | `true` +`alertmanager.name` | alertmanager container name | `alertmanager` +`alertmanager.image.repository` | alertmanager container image repository | `prom/alertmanager` +`alertmanager.image.tag` | alertmanager container image tag | `v0.20.0` +`alertmanager.image.pullPolicy` | alertmanager container image pull policy | `IfNotPresent` +`alertmanager.prefixURL` | The prefix slug at which the server can be accessed | `` +`alertmanager.baseURL` | The external url at which the server can be accessed | `"http://localhost:9093"` +`alertmanager.extraArgs` | Additional alertmanager container arguments | `{}` +`alertmanager.extraSecretMounts` | Additional alertmanager Secret mounts | `[]` +`alertmanager.configMapOverrideName` | Prometheus alertmanager ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}` and setting this value will prevent the default alertmanager ConfigMap from being generated | `""` +`alertmanager.configFromSecret` | The name of a secret in the same kubernetes namespace which contains the Alertmanager config, setting this value will prevent the default alertmanager ConfigMap from being generated | `""` +`alertmanager.configFileName` | The configuration file name to be loaded to alertmanager. Must match the key within configuration loaded from ConfigMap/Secret. | `alertmanager.yml` +`alertmanager.ingress.enabled` | If true, alertmanager Ingress will be created | `false` +`alertmanager.ingress.annotations` | alertmanager Ingress annotations | `{}` +`alertmanager.ingress.extraLabels` | alertmanager Ingress additional labels | `{}` +`alertmanager.ingress.hosts` | alertmanager Ingress hostnames | `[]` +`alertmanager.ingress.extraPaths` | Ingress extra paths to prepend to every alertmanager host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`alertmanager.ingress.tls` | alertmanager Ingress TLS configuration (YAML) | `[]` +`alertmanager.nodeSelector` | node labels for alertmanager pod assignment | `{}` +`alertmanager.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`alertmanager.affinity` | pod affinity | `{}` +`alertmanager.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`alertmanager.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`alertmanager.schedulerName` | alertmanager alternate scheduler name | `nil` +`alertmanager.persistentVolume.enabled` | If true, alertmanager will create a Persistent Volume Claim | `true` +`alertmanager.persistentVolume.accessModes` | alertmanager data Persistent Volume access modes | `[ReadWriteOnce]` +`alertmanager.persistentVolume.annotations` | Annotations for alertmanager Persistent Volume Claim | `{}` +`alertmanager.persistentVolume.existingClaim` | alertmanager data Persistent Volume existing claim name | `""` +`alertmanager.persistentVolume.mountPath` | alertmanager data Persistent Volume mount root path | `/data` +`alertmanager.persistentVolume.size` | alertmanager data Persistent Volume size | `2Gi` +`alertmanager.persistentVolume.storageClass` | alertmanager data Persistent Volume Storage Class | `unset` +`alertmanager.persistentVolume.volumeBindingMode` | alertmanager data Persistent Volume Binding Mode | `unset` +`alertmanager.persistentVolume.subPath` | Subdirectory of alertmanager data Persistent Volume to mount | `""` +`alertmanager.podAnnotations` | annotations to be added to alertmanager pods | `{}` +`alertmanager.podLabels` | labels to be added to Prometheus AlertManager pods | `{}` +`alertmanager.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`alertmanager.replicaCount` | desired number of alertmanager pods | `1` +`alertmanager.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` +`alertmanager.statefulSet.podManagementPolicy` | podManagementPolicy of alertmanager pods | `OrderedReady` +`alertmanager.statefulSet.headless.annotations` | annotations for alertmanager headless service | `{}` +`alertmanager.statefulSet.headless.labels` | labels for alertmanager headless service | `{}` +`alertmanager.statefulSet.headless.enableMeshPeer` | If true, enable the mesh peer endpoint for the headless service | `{}` +`alertmanager.statefulSet.headless.servicePort` | alertmanager headless service port | `80` +`alertmanager.priorityClassName` | alertmanager priorityClassName | `nil` +`alertmanager.resources` | alertmanager pod resource requests & limits | `{}` +`alertmanager.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for Alert Manager containers | `{}` +`alertmanager.service.annotations` | annotations for alertmanager service | `{}` +`alertmanager.service.clusterIP` | internal alertmanager cluster service IP | `""` +`alertmanager.service.externalIPs` | alertmanager service external IP addresses | `[]` +`alertmanager.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`alertmanager.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`alertmanager.service.servicePort` | alertmanager service port | `80` +`alertmanager.service.sessionAffinity` | Session Affinity for alertmanager service, can be `None` or `ClientIP` | `None` +`alertmanager.service.type` | type of alertmanager service to create | `ClusterIP` +`alertmanager.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }` +`alertmanagerFiles.alertmanager.yml` | Prometheus alertmanager configuration | example configuration +`configmapReload.prometheus.enabled` | If false, the configmap-reload container for Prometheus will not be deployed | `true` +`configmapReload.prometheus.name` | configmap-reload container name | `configmap-reload` +`configmapReload.prometheus.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` +`configmapReload.prometheus.image.tag` | configmap-reload container image tag | `v0.3.0` +`configmapReload.prometheus.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` +`configmapReload.prometheus.extraArgs` | Additional configmap-reload container arguments | `{}` +`configmapReload.prometheus.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` +`configmapReload.prometheus.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]` +`configmapReload.prometheus.resources` | configmap-reload pod resource requests & limits | `{}` +`configmapReload.alertmanager.enabled` | If false, the configmap-reload container for AlertManager will not be deployed | `true` +`configmapReload.alertmanager.name` | configmap-reload container name | `configmap-reload` +`configmapReload.alertmanager.image.repository` | configmap-reload container image repository | `jimmidyson/configmap-reload` +`configmapReload.alertmanager.image.tag` | configmap-reload container image tag | `v0.3.0` +`configmapReload.alertmanager.image.pullPolicy` | configmap-reload container image pull policy | `IfNotPresent` +`configmapReload.alertmanager.extraArgs` | Additional configmap-reload container arguments | `{}` +`configmapReload.alertmanager.extraVolumeDirs` | Additional configmap-reload volume directories | `{}` +`configmapReload.alertmanager.extraConfigmapMounts` | Additional configmap-reload configMap mounts | `[]` +`configmapReload.alertmanager.resources` | configmap-reload pod resource requests & limits | `{}` +`initChownData.enabled` | If false, don't reset data ownership at startup | true +`initChownData.name` | init-chown-data container name | `init-chown-data` +`initChownData.image.repository` | init-chown-data container image repository | `busybox` +`initChownData.image.tag` | init-chown-data container image tag | `latest` +`initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` +`initChownData.resources` | init-chown-data pod resource requests & limits | `{}` +`kubeStateMetrics.enabled` | If true, create kube-state-metrics sub-chart, see the [kube-state-metrics chart for configuration options](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) | `true` +`nodeExporter.enabled` | If true, create node-exporter | `true` +`nodeExporter.name` | node-exporter container name | `node-exporter` +`nodeExporter.image.repository` | node-exporter container image repository| `prom/node-exporter` +`nodeExporter.image.tag` | node-exporter container image tag | `v0.18.1` +`nodeExporter.image.pullPolicy` | node-exporter container image pull policy | `IfNotPresent` +`nodeExporter.extraArgs` | Additional node-exporter container arguments | `{}` +`nodeExporter.extraHostPathMounts` | Additional node-exporter hostPath mounts | `[]` +`nodeExporter.extraConfigmapMounts` | Additional node-exporter configMap mounts | `[]` +`nodeExporter.hostNetwork` | If true, node-exporter pods share the host network namespace | `true` +`nodeExporter.hostPID` | If true, node-exporter pods share the host PID namespace | `true` +`nodeExporter.nodeSelector` | node labels for node-exporter pod assignment | `{}` +`nodeExporter.podAnnotations` | annotations to be added to node-exporter pods | `{}` +`nodeExporter.pod.labels` | labels to be added to node-exporter pods | `{}` +`nodeExporter.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`nodeExporter.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`nodeExporter.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`nodeExporter.podSecurityPolicy.enabled` | Specify if a Pod Security Policy for node-exporter must be created | `false` +`nodeExporter.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`nodeExporter.priorityClassName` | node-exporter priorityClassName | `nil` +`nodeExporter.resources` | node-exporter resource requests and limits (YAML) | `{}` +`nodeExporter.securityContext` | securityContext for containers in pod | `{}` +`nodeExporter.service.annotations` | annotations for node-exporter service | `{prometheus.io/scrape: "true"}` +`nodeExporter.service.clusterIP` | internal node-exporter cluster service IP | `None` +`nodeExporter.service.externalIPs` | node-exporter service external IP addresses | `[]` +`nodeExporter.service.hostPort` | node-exporter service host port | `9100` +`nodeExporter.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`nodeExporter.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`nodeExporter.service.servicePort` | node-exporter service port | `9100` +`nodeExporter.service.type` | type of node-exporter service to create | `ClusterIP` +`podSecurityPolicy.enabled` | If true, create & use pod security policies resources | `false` +`pushgateway.enabled` | If true, create pushgateway | `true` +`pushgateway.name` | pushgateway container name | `pushgateway` +`pushgateway.image.repository` | pushgateway container image repository | `prom/pushgateway` +`pushgateway.image.tag` | pushgateway container image tag | `v1.0.1` +`pushgateway.image.pullPolicy` | pushgateway container image pull policy | `IfNotPresent` +`pushgateway.extraArgs` | Additional pushgateway container arguments | `{}` +`pushgateway.ingress.enabled` | If true, pushgateway Ingress will be created | `false` +`pushgateway.ingress.annotations` | pushgateway Ingress annotations | `{}` +`pushgateway.ingress.hosts` | pushgateway Ingress hostnames | `[]` +`pushgateway.ingress.extraPaths` | Ingress extra paths to prepend to every pushgateway host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`pushgateway.ingress.tls` | pushgateway Ingress TLS configuration (YAML) | `[]` +`pushgateway.nodeSelector` | node labels for pushgateway pod assignment | `{}` +`pushgateway.podAnnotations` | annotations to be added to pushgateway pods | `{}` +`pushgateway.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`pushgateway.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`pushgateway.replicaCount` | desired number of pushgateway pods | `1` +`pushgateway.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`pushgateway.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`pushgateway.schedulerName` | pushgateway alternate scheduler name | `nil` +`pushgateway.persistentVolume.enabled` | If true, Prometheus pushgateway will create a Persistent Volume Claim | `false` +`pushgateway.persistentVolume.accessModes` | Prometheus pushgateway data Persistent Volume access modes | `[ReadWriteOnce]` +`pushgateway.persistentVolume.annotations` | Prometheus pushgateway data Persistent Volume annotations | `{}` +`pushgateway.persistentVolume.existingClaim` | Prometheus pushgateway data Persistent Volume existing claim name | `""` +`pushgateway.persistentVolume.mountPath` | Prometheus pushgateway data Persistent Volume mount root path | `/data` +`pushgateway.persistentVolume.size` | Prometheus pushgateway data Persistent Volume size | `2Gi` +`pushgateway.persistentVolume.storageClass` | Prometheus pushgateway data Persistent Volume Storage Class | `unset` +`pushgateway.persistentVolume.volumeBindingMode` | Prometheus pushgateway data Persistent Volume Binding Mode | `unset` +`pushgateway.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` +`pushgateway.priorityClassName` | pushgateway priorityClassName | `nil` +`pushgateway.resources` | pushgateway pod resource requests & limits | `{}` +`pushgateway.service.annotations` | annotations for pushgateway service | `{}` +`pushgateway.service.clusterIP` | internal pushgateway cluster service IP | `""` +`pushgateway.service.externalIPs` | pushgateway service external IP addresses | `[]` +`pushgateway.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`pushgateway.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`pushgateway.service.servicePort` | pushgateway service port | `9091` +`pushgateway.service.type` | type of pushgateway service to create | `ClusterIP` +`pushgateway.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }` +`rbac.create` | If true, create & use RBAC resources | `true` +`server.enabled` | If false, Prometheus server will not be created | `true` +`server.name` | Prometheus server container name | `server` +`server.image.repository` | Prometheus server container image repository | `prom/prometheus` +`server.image.tag` | Prometheus server container image tag | `v2.16.0` +`server.image.pullPolicy` | Prometheus server container image pull policy | `IfNotPresent` +`server.configPath` | Path to a prometheus server config file on the container FS | `/etc/config/prometheus.yml` +`server.global.scrape_interval` | How frequently to scrape targets by default | `1m` +`server.global.scrape_timeout` | How long until a scrape request times out | `10s` +`server.global.evaluation_interval` | How frequently to evaluate rules | `1m` +`server.remoteWrite` | The remote write feature of Prometheus allow transparently sending samples. | `{}` +`server.remoteRead` | The remote read feature of Prometheus allow transparently receiving samples. | `{}` +`server.extraArgs` | Additional Prometheus server container arguments | `{}` +`server.extraFlags` | Additional Prometheus server container flags | `["web.enable-lifecycle"]` +`server.extraInitContainers` | Init containers to launch alongside the server | `[]` +`server.prefixURL` | The prefix slug at which the server can be accessed | `` +`server.baseURL` | The external url at which the server can be accessed | `` +`server.env` | Prometheus server environment variables | `[]` +`server.extraHostPathMounts` | Additional Prometheus server hostPath mounts | `[]` +`server.extraConfigmapMounts` | Additional Prometheus server configMap mounts | `[]` +`server.extraSecretMounts` | Additional Prometheus server Secret mounts | `[]` +`server.extraVolumeMounts` | Additional Prometheus server Volume mounts | `[]` +`server.extraVolumes` | Additional Prometheus server Volumes | `[]` +`server.configMapOverrideName` | Prometheus server ConfigMap override where full-name is `{{.Release.Name}}-{{.Values.server.configMapOverrideName}}` and setting this value will prevent the default server ConfigMap from being generated | `""` +`server.ingress.enabled` | If true, Prometheus server Ingress will be created | `false` +`server.ingress.annotations` | Prometheus server Ingress annotations | `[]` +`server.ingress.extraLabels` | Prometheus server Ingress additional labels | `{}` +`server.ingress.hosts` | Prometheus server Ingress hostnames | `[]` +`server.ingress.extraPaths` | Ingress extra paths to prepend to every Prometheus server host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions) | `[]` +`server.ingress.tls` | Prometheus server Ingress TLS configuration (YAML) | `[]` +`server.nodeSelector` | node labels for Prometheus server pod assignment | `{}` +`server.tolerations` | node taints to tolerate (requires Kubernetes >=1.6) | `[]` +`server.affinity` | pod affinity | `{}` +`server.podDisruptionBudget.enabled` | If true, create a PodDisruptionBudget | `false` +`server.podDisruptionBudget.maxUnavailable` | Maximum unavailable instances in PDB | `1` +`server.priorityClassName` | Prometheus server priorityClassName | `nil` +`server.schedulerName` | Prometheus server alternate scheduler name | `nil` +`server.persistentVolume.enabled` | If true, Prometheus server will create a Persistent Volume Claim | `true` +`server.persistentVolume.accessModes` | Prometheus server data Persistent Volume access modes | `[ReadWriteOnce]` +`server.persistentVolume.annotations` | Prometheus server data Persistent Volume annotations | `{}` +`server.persistentVolume.existingClaim` | Prometheus server data Persistent Volume existing claim name | `""` +`server.persistentVolume.mountPath` | Prometheus server data Persistent Volume mount root path | `/data` +`server.persistentVolume.size` | Prometheus server data Persistent Volume size | `8Gi` +`server.persistentVolume.storageClass` | Prometheus server data Persistent Volume Storage Class | `unset` +`server.persistentVolume.volumeBindingMode` | Prometheus server data Persistent Volume Binding Mode | `unset` +`server.persistentVolume.subPath` | Subdirectory of Prometheus server data Persistent Volume to mount | `""` +`server.emptyDir.sizeLimit` | emptyDir sizeLimit if a Persistent Volume is not used | `""` +`server.podAnnotations` | annotations to be added to Prometheus server pods | `{}` +`server.podLabels` | labels to be added to Prometheus server pods | `{}` +`server.alertmanagers` | Prometheus AlertManager configuration for the Prometheus server | `{}` +`server.deploymentAnnotations` | annotations to be added to Prometheus server deployment | `{}` +`server.podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +`server.replicaCount` | desired number of Prometheus server pods | `1` +`server.statefulSet.enabled` | If true, use a statefulset instead of a deployment for pod management | `false` +`server.statefulSet.annotations` | annotations to be added to Prometheus server stateful set | `{}` +`server.statefulSet.labels` | labels to be added to Prometheus server stateful set | `{}` +`server.statefulSet.podManagementPolicy` | podManagementPolicy of server pods | `OrderedReady` +`server.statefulSet.headless.annotations` | annotations for Prometheus server headless service | `{}` +`server.statefulSet.headless.labels` | labels for Prometheus server headless service | `{}` +`server.statefulSet.headless.servicePort` | Prometheus server headless service port | `80` +`server.resources` | Prometheus server resource requests and limits | `{}` +`server.verticalAutoscaler.enabled` | If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) | `false` +`server.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for server containers | `{}` +`server.service.annotations` | annotations for Prometheus server service | `{}` +`server.service.clusterIP` | internal Prometheus server cluster service IP | `""` +`server.service.externalIPs` | Prometheus server service external IP addresses | `[]` +`server.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` +`server.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` +`server.service.nodePort` | Port to be used as the service NodePort (ignored if `server.service.type` is not `NodePort`) | `0` +`server.service.servicePort` | Prometheus server service port | `80` +`server.service.sessionAffinity` | Session Affinity for server service, can be `None` or `ClientIP` | `None` +`server.service.type` | type of Prometheus server service to create | `ClusterIP` +`server.service.gRPC.enabled` | If true, open a second port on the service for gRPC | `false` +`server.service.gRPC.servicePort` | Prometheus service gRPC port, (ignored if `server.service.gRPC.enabled` is not `true`) | `10901` +`server.service.gRPC.nodePort` | Port to be used as gRPC nodePort in the prometheus service | `0` +`server.service.statefulsetReplica.enabled` | If true, send the traffic from the service to only one replica of the replicaset | `false` +`server.service.statefulsetReplica.replica` | Which replica to send the traffice to | `0` +`server.sidecarContainers` | array of snippets with your sidecar containers for prometheus server | `""` +`server.strategy` | Deployment strategy | `{ "type": "RollingUpdate" }` +`serviceAccounts.alertmanager.create` | If true, create the alertmanager service account | `true` +`serviceAccounts.alertmanager.name` | name of the alertmanager service account to use or create | `{{ prometheus.alertmanager.fullname }}` +`serviceAccounts.kubeStateMetrics.create` | If true, create the kubeStateMetrics service account | `true` +`serviceAccounts.kubeStateMetrics.name` | name of the kubeStateMetrics service account to use or create | `{{ prometheus.kubeStateMetrics.fullname }}` +`serviceAccounts.nodeExporter.create` | If true, create the nodeExporter service account | `true` +`serviceAccounts.nodeExporter.name` | name of the nodeExporter service account to use or create | `{{ prometheus.nodeExporter.fullname }}` +`serviceAccounts.pushgateway.create` | If true, create the pushgateway service account | `true` +`serviceAccounts.pushgateway.name` | name of the pushgateway service account to use or create | `{{ prometheus.pushgateway.fullname }}` +`serviceAccounts.server.create` | If true, create the server service account | `true` +`serviceAccounts.server.name` | name of the server service account to use or create | `{{ prometheus.server.fullname }}` +`server.terminationGracePeriodSeconds` | Prometheus server Pod termination grace period | `300` +`server.retention` | (optional) Prometheus data retention | `"15d"` +`serverFiles.alerts` | (Deprecated) Prometheus server alerts configuration | `{}` +`serverFiles.rules` | (Deprecated) Prometheus server rules configuration | `{}` +`serverFiles.alerting_rules.yml` | Prometheus server alerts configuration | `{}` +`serverFiles.recording_rules.yml` | Prometheus server rules configuration | `{}` +`serverFiles.prometheus.yml` | Prometheus server scrape configuration | example configuration +`extraScrapeConfigs` | Prometheus server additional scrape configuration | "" +`alertRelabelConfigs` | Prometheus server [alert relabeling configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs) for H/A prometheus | "" +`networkPolicy.enabled` | Enable NetworkPolicy | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install stable/prometheus --name my-release \ + --set server.terminationGracePeriodSeconds=360 +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install stable/prometheus --name my-release -f values.yaml +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +Note that you have multiple yaml files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +$ helm install stable/prometheus --name my-release -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration +Roles and RoleBindings resources will be created automatically for `server` and `kubeStateMetrics` services. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager +and Kube State Metrics by only accepting connections from Prometheus Server. +All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that +implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need +to manually create a networkpolicy which allows it. diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/.helmignore b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/Chart.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 000000000..7752ccb44 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: 1.9.5 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: jose@armesto.net + name: fiunchinho +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +version: 2.7.2 diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/OWNERS b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/OWNERS new file mode 100644 index 000000000..6ffd97d74 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/OWNERS @@ -0,0 +1,8 @@ +approvers: +- fiunchinho +- tariq1890 +- mrueg +reviewers: +- fiunchinho +- tariq1890 +- mrueg diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/README.md b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/README.md new file mode 100644 index 000000000..5c6456983 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/README.md @@ -0,0 +1,73 @@ +# kube-state-metrics Helm Chart + +* Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install stable/kube-state-metrics +``` + +## Configuration + +| Parameter | Description | Default | +|:---------------------------------------------|:--------------------------------------------------------------------------------------|:-------------------------------------------| +| `image.repository` | The image repository to pull from | quay.io/coreos/kube-state-metrics | +| `image.tag` | The image tag to pull from | `v1.9.5` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `replicas` | Number of replicas | `1` | +| `autosharding.enabled` | Set to `true` to automatically shard data across `replicas` pods. EXPERIMENTAL | `false` | +| `service.port` | The port of the container | `8080` | +| `service.annotations` | Annotations to be added to the service | `{}` | +| `customLabels` | Custom labels to apply to service, deployment and pods | `{}` | +| `hostNetwork` | Whether or not to use the host network | `false` | +| `prometheusScrape` | Whether or not enable prom scrape | `true` | +| `rbac.create` | If true, create & use RBAC resources | `true` | +| `serviceAccount.create` | If true, create & use serviceAccount | `true` | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `serviceAccount.imagePullSecrets` | Specify image pull secrets field | `[]` | +| `podSecurityPolicy.enabled` | If true, create & use PodSecurityPolicy resources | `false` | +| `podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | {} | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `65534` | +| `securityContext.runAsUser` | User ID for the container | `65534` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity settings for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `podAnnotations` | Annotations to be added to the pod | {} | +| `resources` | kube-state-metrics resource requests and limits | {} | +| `collectors.certificatesigningrequests` | Enable the certificatesigningrequests collector. | `true` | +| `collectors.configmaps` | Enable the configmaps collector. | `true` | +| `collectors.cronjobs` | Enable the cronjobs collector. | `true` | +| `collectors.daemonsets` | Enable the daemonsets collector. | `true` | +| `collectors.deployments` | Enable the deployments collector. | `true` | +| `collectors.endpoints` | Enable the endpoints collector. | `true` | +| `collectors.horizontalpodautoscalers` | Enable the horizontalpodautoscalers collector. | `true` | +| `collectors.ingresses` | Enable the ingresses collector. | `true` | +| `collectors.jobs` | Enable the jobs collector. | `true` | +| `collectors.limitranges` | Enable the limitranges collector. | `true` | +| `collectors.mutatingwebhookconfigurations` | Enable the mutatingwebhookconfigurations collector. | `false` | +| `collectors.namespaces` | Enable the namespaces collector. | `true` | +| `collectors.nodes` | Enable the nodes collector. | `true` | +| `collectors.persistentvolumeclaims` | Enable the persistentvolumeclaims collector. | `true` | +| `collectors.persistentvolumes` | Enable the persistentvolumes collector. | `true` | +| `collectors.poddisruptionbudgets` | Enable the poddisruptionbudgets collector. | `true` | +| `collectors.pods` | Enable the pods collector. | `true` | +| `collectors.replicasets` | Enable the replicasets collector. | `true` | +| `collectors.replicationcontrollers` | Enable the replicationcontrollers collector. | `true` | +| `collectors.resourcequotas` | Enable the resourcequotas collector. | `true` | +| `collectors.secrets` | Enable the secrets collector. | `true` | +| `collectors.services` | Enable the services collector. | `true` | +| `collectors.statefulsets` | Enable the statefulsets collector. | `true` | +| `collectors.storageclasses` | Enable the storageclasses collector. | `true` | +| `collectors.validatingwebhookconfigurations` | Enable the validatingwebhookconfigurations collector. | `false` | +| `collectors.verticalpodautoscalers` | Enable the verticalpodautoscalers collector. | `false` | +| `collectors.volumeattachments` | Enable the volumeattachments collector. | `false` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `prometheus.monitor.namespace` | Namespace where servicemonitor resource should be created | `the same namespace as kube-state-metrics` | +| `prometheus.monitor.honorLabels` | Honor metric labels | `false` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 000000000..5a646e0cc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 000000000..6ae0e647f --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml new file mode 100644 index 000000000..319aec16c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrole.yaml @@ -0,0 +1,180 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +rules: +{{ if .Values.collectors.certificatesigningrequests }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.configmaps }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.cronjobs }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.daemonsets }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.deployments }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.endpoints }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.horizontalpodautoscalers }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.ingresses }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.jobs }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.limitranges }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.mutatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.namespaces }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.networkpolicies }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.nodes }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.persistentvolumeclaims }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.persistentvolumes }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.poddisruptionbudgets }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.pods }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.replicasets }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.replicationcontrollers }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.resourcequotas }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.secrets }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.services }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.statefulsets }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.storageclasses }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.validatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.volumeattachments }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if .Values.collectors.verticalpodautoscalers }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..4635985aa --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-state-metrics.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 000000000..b6affcc20 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,186 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + replicas: {{ .Values.replicas }} +{{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] +{{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} +{{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- end }} + args: +{{ if .Values.collectors.certificatesigningrequests }} + - --collectors=certificatesigningrequests +{{ end }} +{{ if .Values.collectors.configmaps }} + - --collectors=configmaps +{{ end }} +{{ if .Values.collectors.cronjobs }} + - --collectors=cronjobs +{{ end }} +{{ if .Values.collectors.daemonsets }} + - --collectors=daemonsets +{{ end }} +{{ if .Values.collectors.deployments }} + - --collectors=deployments +{{ end }} +{{ if .Values.collectors.endpoints }} + - --collectors=endpoints +{{ end }} +{{ if .Values.collectors.horizontalpodautoscalers }} + - --collectors=horizontalpodautoscalers +{{ end }} +{{ if .Values.collectors.ingresses }} + - --collectors=ingresses +{{ end }} +{{ if .Values.collectors.jobs }} + - --collectors=jobs +{{ end }} +{{ if .Values.collectors.limitranges }} + - --collectors=limitranges +{{ end }} +{{ if .Values.collectors.mutatingwebhookconfigurations }} + - --collectors=mutatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.namespaces }} + - --collectors=namespaces +{{ end }} +{{ if .Values.collectors.networkpolicies }} + - --collectors=networkpolicies +{{ end }} +{{ if .Values.collectors.nodes }} + - --collectors=nodes +{{ end }} +{{ if .Values.collectors.persistentvolumeclaims }} + - --collectors=persistentvolumeclaims +{{ end }} +{{ if .Values.collectors.persistentvolumes }} + - --collectors=persistentvolumes +{{ end }} +{{ if .Values.collectors.poddisruptionbudgets }} + - --collectors=poddisruptionbudgets +{{ end }} +{{ if .Values.collectors.pods }} + - --collectors=pods +{{ end }} +{{ if .Values.collectors.replicasets }} + - --collectors=replicasets +{{ end }} +{{ if .Values.collectors.replicationcontrollers }} + - --collectors=replicationcontrollers +{{ end }} +{{ if .Values.collectors.resourcequotas }} + - --collectors=resourcequotas +{{ end }} +{{ if .Values.collectors.secrets }} + - --collectors=secrets +{{ end }} +{{ if .Values.collectors.services }} + - --collectors=services +{{ end }} +{{ if .Values.collectors.statefulsets }} + - --collectors=statefulsets +{{ end }} +{{ if .Values.collectors.storageclasses }} + - --collectors=storageclasses +{{ end }} +{{ if .Values.collectors.validatingwebhookconfigurations }} + - --collectors=validatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.verticalpodautoscalers }} + - --collectors=verticalpodautoscalers +{{ end }} +{{ if .Values.collectors.volumeattachments }} + - --collectors=volumeattachments +{{ end }} +{{ if .Values.namespace }} + - --namespace={{ .Values.namespace }} +{{ end }} +{{ if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) +{{ end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: 8080 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..aeff11791 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100644 index 000000000..dcd65e13e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100644 index 000000000..a206e640d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 000000000..5dacf5217 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: 8080 +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} + selector: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 000000000..32bb1640f --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 000000000..54cde362d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,25 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 000000000..bf5396072 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - kube-state-metrics + resources: + - statefulsets + verbs: + - get +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 000000000..6a2e5bfe7 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/values.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/values.yaml new file mode 100644 index 000000000..888ca544b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/charts/kube-state-metrics/values.yaml @@ -0,0 +1,126 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: quay.io/coreos/kube-state-metrics + tag: v1.9.5 + pullPolicy: IfNotPresent + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + annotations: {} + +customLabels: {} + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + honorLabels: false + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + +securityContext: + enabled: true + runAsUser: 65534 + fsGroup: 65534 + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Available collectors for kube-state-metrics. By default all available +# collectors are enabled. +collectors: + certificatesigningrequests: true + configmaps: true + cronjobs: true + daemonsets: true + deployments: true + endpoints: true + horizontalpodautoscalers: true + ingresses: true + jobs: true + limitranges: true + mutatingwebhookconfigurations: false + namespaces: true + networkpolicies: false + nodes: true + persistentvolumeclaims: true + persistentvolumes: true + poddisruptionbudgets: true + pods: true + replicasets: true + replicationcontrollers: true + resourcequotas: true + secrets: false + services: true + statefulsets: true + storageclasses: true + validatingwebhookconfigurations: false + verticalpodautoscalers: false + volumeattachments: false + +# Namespace to be enabled for collecting resources. By default all namespaces are collected. +# namespace: "" + +## Override the deployment namespace +## +namespaceOverride: "" diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.lock b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.lock new file mode 100644 index 000000000..4a4bde218 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kube-state-metrics + repository: https://kubernetes-charts.storage.googleapis.com/ + version: 2.7.2 +digest: sha256:695d0dbc2db8bccf5672145697546891da60ff12fbdb4f1bfc02459f4b755e4c +generated: 2020-03-18T18:57:59.00056179Z diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.yaml new file mode 100644 index 000000000..6e079ae7d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/requirements.yaml @@ -0,0 +1,7 @@ +dependencies: + + - name: kube-state-metrics + version: "2.7.*" + repository: https://kubernetes-charts.storage.googleapis.com/ + condition: kubeStateMetrics.enabled + diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/NOTES.txt b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/NOTES.txt new file mode 100644 index 000000000..0e8868f0b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 000000000..295aa01c5 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,276 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.kubeStateMetrics.labels" -}} +{{ include "prometheus.kubeStateMetrics.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.kubeStateMetrics.matchLabels" -}} +component: {{ .Values.kubeStateMetrics.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified kube-state-metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.kubeStateMetrics.fullname" -}} +{{- if .Values.kubeStateMetrics.fullnameOverride -}} +{{- .Values.kubeStateMetrics.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.kubeStateMetrics.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.3-0, <1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the kubeStateMetrics component +*/}} +{{- define "prometheus.serviceAccountName.kubeStateMetrics" -}} +{{- if .Values.serviceAccounts.kubeStateMetrics.create -}} + {{ default (include "prometheus.kubeStateMetrics.fullname" .) .Values.serviceAccounts.kubeStateMetrics.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.kubeStateMetrics.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrole.yaml new file mode 100644 index 000000000..9bec69257 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrole.yaml @@ -0,0 +1,23 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml new file mode 100644 index 000000000..a058c7121 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-configmap.yaml new file mode 100644 index 000000000..09708915c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-configmap.yaml @@ -0,0 +1,20 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{- if $key | regexMatch ".*\\.ya?ml$" }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} + {{- else }} + {{ $key }}: {{ toYaml $value | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-deployment.yaml new file mode 100644 index 000000000..892204ab2 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-deployment.yaml @@ -0,0 +1,141 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.alertmanager.strategy }} + strategy: +{{ toYaml .Values.alertmanager.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: +{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=$(POD_IP):6783 + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/-/ready + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end -}} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-ingress.yaml new file mode 100644 index 000000000..dc6ba4418 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-ingress.yaml @@ -0,0 +1,44 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-networkpolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-networkpolicy.yaml new file mode 100644 index 000000000..62633d0bc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-networkpolicy.yaml @@ -0,0 +1,21 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pdb.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pdb.yaml new file mode 100644 index 000000000..bb190f23d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pdb.yaml @@ -0,0 +1,15 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml new file mode 100644 index 000000000..da2fbbd1c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-podsecuritypolicy.yaml @@ -0,0 +1,50 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pvc.yaml new file mode 100644 index 000000000..58de9fd39 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-pvc.yaml @@ -0,0 +1,34 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} +{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service-headless.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service-headless.yaml new file mode 100644 index 000000000..1519344ba --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service-headless.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service.yaml new file mode 100644 index 000000000..9bc45f7c6 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-service.yaml @@ -0,0 +1,54 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-serviceaccount.yaml new file mode 100644 index 000000000..d99c29996 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-statefulset.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-statefulset.yaml new file mode 100644 index 000000000..25a9b7a9c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/alertmanager-statefulset.yaml @@ -0,0 +1,154 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: +{{ toYaml .Values.alertmanager.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=$(POD_IP):6783 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: {} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-daemonset.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-daemonset.yaml new file mode 100644 index 000000000..fce0f2714 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-daemonset.yaml @@ -0,0 +1,125 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.nodeExporter.hostNetwork }} + - --web.listen-address=:{{ .Values.nodeExporter.service.hostPort }} + {{- end }} + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + containerPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + containerPort: 9100 + {{- end }} + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml new file mode 100644 index 000000000..243667dd1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-podsecuritypolicy.yaml @@ -0,0 +1,57 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-role.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-role.yaml new file mode 100644 index 000000000..1926db04e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-role.yaml @@ -0,0 +1,19 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-rolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-rolebinding.yaml new file mode 100644 index 000000000..fb39ab64f --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-rolebinding.yaml @@ -0,0 +1,21 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-service.yaml new file mode 100644 index 000000000..40cbd8d69 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-service.yaml @@ -0,0 +1,46 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + {{- if .Values.nodeExporter.hostNetwork }} + targetPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + targetPort: 9100 + {{- end }} + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-serviceaccount.yaml new file mode 100644 index 000000000..b75c4a4b6 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/node-exporter-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrole.yaml new file mode 100644 index 000000000..9ea81e4a2 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrole.yaml @@ -0,0 +1,23 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml new file mode 100644 index 000000000..475d9d63d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-deployment.yaml new file mode 100644 index 000000000..9dec641fc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-deployment.yaml @@ -0,0 +1,99 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: +{{ toYaml .Values.pushgateway.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-ingress.yaml new file mode 100644 index 000000000..422129b6f --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-ingress.yaml @@ -0,0 +1,41 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-networkpolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-networkpolicy.yaml new file mode 100644 index 000000000..70a5ada3b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-networkpolicy.yaml @@ -0,0 +1,21 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pdb.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pdb.yaml new file mode 100644 index 000000000..edf2318fe --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pdb.yaml @@ -0,0 +1,15 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml new file mode 100644 index 000000000..80617cbc7 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-podsecuritypolicy.yaml @@ -0,0 +1,46 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pvc.yaml new file mode 100644 index 000000000..5b4eeb937 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-pvc.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} +{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-service.yaml new file mode 100644 index 000000000..ffcc4a20b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-service.yaml @@ -0,0 +1,42 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-serviceaccount.yaml new file mode 100644 index 000000000..3b221e43d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/pushgateway-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrole.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrole.yaml new file mode 100644 index 000000000..410685193 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrole.yaml @@ -0,0 +1,49 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.server.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrolebinding.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrolebinding.yaml new file mode 100644 index 000000000..a89f3bd76 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if and .Values.server.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.fullname" . }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-configmap.yaml new file mode 100644 index 000000000..52a5d5e53 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-configmap.yaml @@ -0,0 +1,83 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: + action: drop +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-deployment.yaml new file mode 100644 index 000000000..4a1c14f08 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-deployment.yaml @@ -0,0 +1,216 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: +{{ toYaml .Values.server.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: +{{ toYaml .Values.server.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- toYaml .Values.server.sidecarContainers | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-ingress.yaml new file mode 100644 index 000000000..4cdca92a3 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-ingress.yaml @@ -0,0 +1,46 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +spec: + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-networkpolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-networkpolicy.yaml new file mode 100644 index 000000000..152f3a967 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-networkpolicy.yaml @@ -0,0 +1,19 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pdb.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pdb.yaml new file mode 100644 index 000000000..ec90cd5c9 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pdb.yaml @@ -0,0 +1,15 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-podsecuritypolicy.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-podsecuritypolicy.yaml new file mode 100644 index 000000000..73bf065dc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-podsecuritypolicy.yaml @@ -0,0 +1,55 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pvc.yaml new file mode 100644 index 000000000..22cb51afc --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-pvc.yaml @@ -0,0 +1,36 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service-headless.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service-headless.yaml new file mode 100644 index 000000000..018a75b79 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service-headless.yaml @@ -0,0 +1,28 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service.yaml new file mode 100644 index 000000000..e03faf974 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-service.yaml @@ -0,0 +1,61 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ .Release.Name }}-{{ .Values.server.name }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-serviceaccount.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-serviceaccount.yaml new file mode 100644 index 000000000..6cf017c20 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-statefulset.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-statefulset.yaml new file mode 100644 index 000000000..9369ddf38 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-statefulset.yaml @@ -0,0 +1,224 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: +{{ toYaml .Values.server.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} +{{- end }} +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- toYaml .Values.server.sidecarContainers | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: {} +{{- end }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-vpa.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-vpa.yaml new file mode 100644 index 000000000..8aec16ad5 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/templates/server-vpa.yaml @@ -0,0 +1,26 @@ +{{ if .Values.global.prometheus.enabled }} +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +spec: + targetRef: +{{- if .Values.server.statefulSet.enabled }} + apiVersion: "apps/v1" + kind: StatefulSet +{{- else }} + apiVersion: "extensions/v1beta1" + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/values.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/values.yaml new file mode 100644 index 000000000..3d44e7f59 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/prometheus/values.yaml @@ -0,0 +1,1387 @@ +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + nodeExporter: + create: true + name: + pushgateway: + create: true + name: + server: + create: true + name: + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + strategy: + type: Recreate + rollingUpdate: null + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: prom/alertmanager + tag: v0.20.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.3.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.3.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## Please see https://github.com/helm/charts/tree/master/stable/kube-state-metrics for configurable values + ## + enabled: true + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: prom/node-exporter + tag: v0.18.1 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + ## Security context to be added to node-exporter pods + ## + securityContext: {} + # runAsUser: 0 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + name: server + sidecarContainers: + strategy: + type: Recreate + rollingUpdate: null + + ## Prometheus server container image + ## + image: + repository: prom/prometheus + tag: v2.17.2 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: {} + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: {} + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + readinessProbeInitialDelay: 30 + readinessProbeTimeout: 30 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbeTimeout: 30 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.0.1 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + strategy: + type: Recreate + rollingUpdate: null + + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_network_receive_errors_total|container_network_transmit_errors_total|container_network_receive_packets_dropped_total|container_network_transmit_packets_dropped_total|container_memory_usage_bytes|container_cpu_cfs_throttled_periods_total|container_cpu_cfs_periods_total|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_periods_total|container_fs_inodes_free|container_fs_inodes_total|container_fs_usage_bytes|container_fs_limit_bytes|container_cpu_cfs_throttled_periods_total|container_cpu_cfs_periods_total|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_fs_inodes_free|container_fs_inodes_total|container_fs_usage_bytes|container_fs_limit_bytes|container_spec_cpu_shares|container_spec_memory_limit_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|container_fs_reads_bytes_total|container_network_receive_bytes_total|container_fs_writes_bytes_total|container_fs_reads_bytes_total|cadvisor_version_info) + action: keep + - source_labels: [ container ] + target_label: container_name + regex: (.+) + action: replace + - source_labels: [ pod ] + target_label: pod_name + regex: (.+) + action: replace + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + metric_relabel_configs: + - source_labels: [ __name__ ] + regex: (container_cpu_allocation|container_cpu_usage_seconds_total|container_fs_limit_bytes|container_memory_allocation_bytes|container_memory_usage_bytes|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_transmit_bytes_total|deployment_match_labels|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_job_status_failed|kube_namespace_annotations|kube_namespace_labels|kube_node_info|kube_node_labels|kube_node_status_capacity_cpu_cores|kube_node_status_capacity_memory_bytes|kube_node_status_condition|kube_persistentvolume_capacity_bytes|kube_persistentvolume_status_phase|kube_persistentvolumeclaim_info|kube_persistentvolumeclaim_resource_requests_storage_bytes|kube_persistentvolumeclaim_resource_requests_storage_bytes|container_memory_allocation_bytes|kube_pod_container_resource_limits_cpu_cores|kube_pod_container_resource_limits_memory_bytes|kube_pod_container_resource_requests_cpu_cores|kube_pod_container_resource_requests_cpu_cores|container_cpu_usage_seconds_total|kube_pod_container_resource_requests_memory_bytes|kube_pod_container_resource_requests_memory_bytes|kube_node_status_capacity_memory_bytes|kube_pod_container_resource_requests|kube_pod_container_status_restarts_total|kube_pod_container_status_running|kube_pod_container_status_terminated_reason|kube_pod_labels|kube_pod_owner|kube_pod_status_phase|kubecost_cluster_memory_working_set_bytes|kubecost_pod_network_egress_bytes_total|node_cpu_hourly_cost|node_cpu_seconds_total|node_disk_reads_completed|node_disk_reads_completed_total|node_disk_writes_completed|node_disk_writes_completed_total|node_filesystem_device_error|node_gpu_hourly_cost|node_memory_Buffers_bytes|node_memory_Cached_bytes|node_memory_MemAvailable_bytes|node_memory_MemFree_bytes|node_memory_MemTotal_bytes|node_network_transmit_bytes_total|node_ram_hourly_cost|pod_pvc_allocation|pv_hourly_cost|service_selector_labels|statefulSet_match_labels|up|kube_node_status_allocatable_cpu_cores|kube_node_status_allocatable_memory_bytes|container_fs_writes_bytes_total|kube_deployment_status_replicas|kube_statefulset_replicas|kube_daemonset_status_desired_number_scheduled|kube_deployment_status_replicas_available|kube_statefulset_status_replicas|kube_daemonset_status_number_ready|kube_deployment_status_replicas|kube_statefulset_replicas|kube_daemonset_status_desired_number_scheduled|kube_replicaset_owner|kube_pod_container_info) + action: keep + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/.helmignore b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/Chart.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/Chart.yaml new file mode 100644 index 000000000..bf76cbf27 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 0.15.0 +description: Thanos is a set of components that can be composed into a highly available + metric system with unlimited storage capacity, which can be added seamlessly on + top of existing Prometheus deployments. +icon: https://raw.githubusercontent.com/thanos-io/thanos/master/website/static/Thanos-logo_full.svg +keywords: +- thanos +- prometheus +- metrics +maintainers: +- email: info@banzaicloud.com + name: Banzai Cloud +name: thanos +sources: +- https://github.com/thanos-io/thanos +- https://github.com/banzaicloud/banzai-charts/tree/master/thanos +version: 0.15.0 diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/requirements.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/requirements.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/NOTES.txt b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/_helpers.tpl new file mode 100644 index 000000000..7b5fb57d8 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "thanos.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "thanos.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "thanos.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Create a default fully qualified component name from the full app name and a component name. +We truncate the full name at 63 - 1 (last dash) - len(component name) chars because some Kubernetes name fields are limited to this (by the DNS naming spec) +and we want to make sure that the component is included in the name. +*/}} +{{- define "thanos.componentname" -}} +{{- $global := index . 0 -}} +{{- $component := index . 1 | trimPrefix "-" -}} +{{- printf "%s-%s" (include "thanos.fullname" $global | trunc (sub 62 (len $component) | int) | trimSuffix "-" ) $component | trimSuffix "-" -}} +{{- end -}} + +{{/* + +*/}} +{{- define "thanos.secretname" }} +{{- default (include "thanos.name" .) .Values.storeSecretName }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-deployment.yaml new file mode 100644 index 000000000..e8757af68 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-deployment.yaml @@ -0,0 +1,87 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.bucket.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "thanos.componentname" (list $ "bucket") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end -}} + {{- with .Values.bucket.deploymentAnnotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.bucket.replicaCount | default 1 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.deploymentMatchLabels }}{{ toYaml . | indent 6 }}{{ end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.labels }}{{ toYaml . | indent 8 }}{{ end }} + {{- with .Values.bucket.annotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: thanos-bucket + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: {{- with .Values.bucket.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} + args: + - "tools" + - "bucket" + - "web" + - "--log.level={{ .Values.bucket.logLevel }}" + - "--http-address=0.0.0.0:{{ .Values.bucket.http.port }}" + - "--objstore.config-file=/etc/config/object-store.yaml" + {{- if .Values.bucket.refresh }} + - "--refresh={{ .Values.bucket.refresh }}" + {{- end }} + {{- if .Values.bucket.timeout }} + - "--timeout={{ .Values.bucket.timeout }}" + {{- end }} + {{- if .Values.bucket.label }} + - "--label={{ .Values.bucket.label }}" + {{- end }} + {{ with .Values.bucket.extraArgs }}{{ toYaml . | nindent 8 }}{{- end }} + ports: + - name: http + containerPort: {{ .Values.bucket.http.port }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + resources: {{ toYaml .Values.bucket.resources | nindent 10 }} + volumes: + - name: config-volume + secret: + secretName: {{ include "thanos.secretname" . }} + {{- with .Values.bucket.securityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bucket.nodeSelector }} + nodeSelector: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bucket.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bucket.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.bucket.serviceAccount }} + serviceAccountName: "{{ . }}" + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-ingress.yaml new file mode 100644 index 000000000..28b037bda --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-ingress.yaml @@ -0,0 +1,42 @@ +{{ if .Values.global.thanos.enabled }} +{{ if and .Values.bucket.enabled .Values.bucket.http.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "bucket") }} + {{- with .Values.bucket.http.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: bucket + {{- if .Values.bucket.http.ingress.labels }} +{{ toYaml .Values.bucket.http.ingress.labels | indent 4 }} + {{- end }} +spec: + {{- if .Values.bucket.http.ingress.tls }} + tls: + {{- range .Values.bucket.http.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.bucket.http.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.bucket.http.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "bucket") }} + servicePort: {{ $.Values.bucket.http.port }} + {{- end }} +{{ end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-poddisruptionbudget.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-poddisruptionbudget.yaml new file mode 100644 index 000000000..fb3fd1cb7 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-poddisruptionbudget.yaml @@ -0,0 +1,27 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.bucket.enabled .Values.bucket.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "thanos.componentname" (list $ "bucket") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + {{- if .Values.bucket.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.bucket.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.bucket.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.bucket.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/component: bucket +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-service.yaml new file mode 100644 index 000000000..0d56d8a2b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/bucket-service.yaml @@ -0,0 +1,30 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.bucket.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "bucket") }} + {{- with .Values.bucket.http.service.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + ports: + - port: {{ .Values.bucket.http.port }} + protocol: TCP + targetPort: http + name: http + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: bucket +{{ with .Values.bucket.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{ end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-deployment.yaml new file mode 100644 index 000000000..a5b0347ad --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-deployment.yaml @@ -0,0 +1,108 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.compact.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "thanos.componentname" (list $ "compact") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: compact +{{ with .Values.compact.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end -}} + {{- with .Values.compact.deploymentAnnotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.compact.replicaCount | default 1 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: compact +{{ with .Values.compact.deploymentMatchLabels }}{{ toYaml . | indent 6 }}{{ end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: compact +{{ with .Values.compact.labels }}{{ toYaml . | indent 8 }}{{ end }} + {{- with .Values.compact.annotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.compact.metrics.annotations.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.compact.http.port }}" + {{- end }} + spec: + containers: + - name: thanos-compact + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: {{- with .Values.compact.extraEnv }}{{ toYaml . | nindent 8 }}{{- end }} + args: + - "compact" + - "--log.level={{ .Values.compact.logLevel }}" + - "--http-address=0.0.0.0:{{ .Values.compact.http.port }}" + - "--objstore.config-file=/etc/config/object-store.yaml" + - "--data-dir=/var/thanos/compact" + - "--consistency-delay={{ .Values.compact.consistencyDelay }}" + - "--retention.resolution-raw={{ .Values.compact.retentionResolutionRaw }}" + - "--retention.resolution-5m={{ .Values.compact.retentionResolution5m }}" + - "--retention.resolution-1h={{ .Values.compact.retentionResolution1h }}" + - "--block-sync-concurrency={{ .Values.compact.blockSyncConcurrency }}" + - "--compact.concurrency={{ .Values.compact.compactConcurrency }}" +{{- if .Values.compact.disableDownsampling }} + - "--downsampling.disable" +{{- end }} + - "--wait" +{{ with .Values.compact.extraArgs }}{{ toYaml . | indent 8 }}{{- end }} + ports: + - name: http + containerPort: {{ .Values.compact.http.port }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + - name: data-volume + mountPath: /var/thanos/compact + resources: {{ toYaml .Values.compact.resources | nindent 10 }} + volumes: + - name: data-volume + {{- if .Values.compact.dataVolume }} + {{- if .Values.compact.dataVolume.persistentVolumeClaim }} + {{- if .Values.compact.dataVolume.persistentVolumeClaim.claimName }} + persistentVolumeClaim: + claimName: {{ .Values.compact.dataVolume.persistentVolumeClaim.claimName }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: config-volume + secret: + secretName: {{ include "thanos.secretname" . }} + {{- with .Values.compact.securityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compact.nodeSelector }} + nodeSelector: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compact.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compact.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.compact.serviceAccount }} + serviceAccountName: "{{ . }}" + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-pvc.yaml new file mode 100644 index 000000000..f6b33e491 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-pvc.yaml @@ -0,0 +1,27 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.compact.enabled }} +{{- if .Values.compact.dataVolume -}} +{{- if .Values.compact.dataVolume.persistentVolumeClaim -}} +{{- if .Values.compact.dataVolume.persistentVolumeClaim.claimName -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.compact.dataVolume.persistentVolumeClaim.claimName }} +spec: + accessModes: + - ReadWriteOnce + {{- if .Values.compact.dataVolume.persistentVolumeClaim.storageClass }} + storageClassName: {{ .Values.compact.dataVolume.persistentVolumeClaim.storageClass }} + {{- end }} + resources: + requests: + {{- if .Values.compact.dataVolume.persistentVolumeClaim.storage }} + storage: {{ .Values.compact.dataVolume.persistentVolumeClaim.storage }} + {{- else }} + storage: 100Gi + {{- end }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-service.yaml new file mode 100644 index 000000000..080821849 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-service.yaml @@ -0,0 +1,30 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.compact.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "compact") }} + {{- with .Values.compact.http.service.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: compact +{{ with .Values.compact.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + ports: + - port: {{ .Values.compact.http.port }} + protocol: TCP + targetPort: http + name: http + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: compact +{{ with .Values.compact.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{ end}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-servicemonitor.yaml new file mode 100644 index 000000000..bc224d802 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/compact-servicemonitor.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.compact.enabled .Values.compact.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "thanos.componentname" (list $ "compact") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: compact +{{ with .Values.compact.metrics.serviceMonitor.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + jobLabel: thanos-compact + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: compact + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: {{ .Values.compact.metrics.serviceMonitor.interval | default "15s" }} + {{- with .Values.compact.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{ toYaml . | nindent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-deployment.yaml new file mode 100644 index 000000000..74e96d09c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-deployment.yaml @@ -0,0 +1,148 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.query.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "thanos.componentname" (list $ "query") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +{{ with .Values.query.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} + {{- with .Values.query.deploymentAnnotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.query.autoscaling.enabled }} + replicas: {{ .Values.query.replicaCount | default 1 }} +{{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query +{{ with .Values.query.deploymentMatchLabels }}{{ toYaml . | indent 6 }}{{ end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query +{{ with .Values.query.labels }}{{ toYaml . | indent 8 }}{{ end }} + {{- with .Values.query.annotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.query.metrics.annotations.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.query.http.port }}" + {{- end }} + spec: + containers: + - name: thanos-query + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "query" + - "--log.level={{ .Values.query.logLevel }}" + - "--grpc-address=0.0.0.0:{{ .Values.query.grpc.port }}" + - "--http-address=0.0.0.0:{{ .Values.query.http.port }}" + - "--query.timeout={{ .Values.query.timeout }}" + - "--query.max-concurrent={{ .Values.query.maxConcurrent }}" + {{- if .Values.query.autoDownsampling }} + - "--query.auto-downsampling" + {{- end }} + {{- if .Values.query.replicaLabel }} + - "--query.replica-label={{ .Values.query.replicaLabel }}" + {{- end }} + {{- if .Values.query.webRoutePrefix }} + - "--web.route-prefix={{ .Values.query.webRoutePrefix }}" + {{- end }} + {{- if .Values.query.webExternalPrefix }} + - "--web.external-prefix={{ .Values.query.webExternalPrefix }}" + {{- end }} + {{- if .Values.query.webPrefixHeader }} + - "--web.prefix-header={{ .Values.query.webPrefixHeader }}" + {{- end }} + {{- if .Values.query.storeDNSResolver }} + - "--store.sd-dns-resolver={{ .Values.query.storeDNSResolver }}" + {{- end }} + {{- if .Values.query.storeDNSDiscovery }} + - "--store=dnssrv+_grpc._tcp.{{ include "thanos.componentname" (list $ "store") }}-grpc.{{ .Release.Namespace }}.svc" + {{- end }} + {{- if .Values.query.sidecarDNSDiscovery }} + - "--store=dnssrv+_grpc._tcp.{{ include "thanos.componentname" (list $ "sidecar") }}-grpc.{{ .Release.Namespace }}.svc" + {{- end }} + {{- range .Values.query.stores }} + - "--store={{ . }}" + {{- end }} + {{- range .Values.query.serviceDiscoveryFiles }} + - "--store.sd-files={{ . }}" + {{- end }} + {{- range .Values.query.serviceDiscoveryFileConfigMaps }} + - "--store.sd-files=/etc/query/{{ . }}/*.yaml" + - "--store.sd-files=/etc/query/{{ . }}/*.yml" + - "--store.sd-files=/etc/query/{{ . }}/*.json" + {{- end }} + {{- if .Values.query.serviceDiscoveryInterval }} + - "--store.sd-interval={{ .Values.query.serviceDiscoveryInterval }}" + {{- end }} + + {{- if .Values.query.extraArgs }} + {{ toYaml .Values.query.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.query.http.port }} + - name: grpc + containerPort: {{ .Values.query.grpc.port }} + resources: + {{ toYaml .Values.query.resources | nindent 10 }} + env: + {{- toYaml .Values.query.extraEnv | nindent 10 }} + volumeMounts: + {{- range .Values.query.serviceDiscoveryFileConfigMaps }} + - mountPath: /etc/query/{{ . }} + name: {{ . }} + {{- end }} + {{- if .Values.query.certSecretName }} + - mountPath: /etc/certs + name: {{ .Values.query.certSecretName }} + readOnly: true + {{- end }} + livenessProbe: + httpGet: + path: /-/healthy + port: http + volumes: + {{- range .Values.query.serviceDiscoveryFileConfigMaps }} + - name: {{ . }} + configMap: + defaultMode: 420 + name: {{ . }} + {{- end }} + {{- if .Values.query.certSecretName }} + - name: {{ .Values.query.certSecretName }} + secret: + defaultMode: 420 + secretName: {{ .Values.query.certSecretName }} + {{- end }} + {{- with .Values.query.securityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.nodeSelector }} + nodeSelector: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.serviceAccount }} + serviceAccountName: "{{ . }}" + {{- end }} +{{ end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-deployment.yaml new file mode 100644 index 000000000..05c32682d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-deployment.yaml @@ -0,0 +1,122 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.queryFrontend.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} + {{- with .Values.queryFrontend.deploymentAnnotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if not .Values.queryFrontend.autoscaling.enabled }} + replicas: {{ .Values.queryFrontend.replicaCount | default 1 }} +{{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.deploymentMatchLabels }}{{ toYaml . | indent 6 }}{{ end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.labels }}{{ toYaml . | indent 8 }}{{ end }} + {{- with .Values.queryFrontend.annotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.queryFrontend.metrics.annotations.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.queryFrontend.http.port }}" + {{- end }} + spec: + containers: + - name: thanos-query-frontend + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "query-frontend" + - "--log.level={{ .Values.queryFrontend.logLevel }}" + - "--http-address=0.0.0.0:{{ .Values.queryFrontend.http.port }}" + - "--query-frontend.downstream-url=http://{{ include "thanos.componentname" (list $ "query") }}-http.{{ .Release.Namespace }}:{{ .Values.query.http.port }}" + - "--query-range.split-interval={{ .Values.queryFrontend.splitInterval }}" + - "--query-range.max-retries-per-request={{ .Values.queryFrontend.maxRetriesPerRequest }}" + - "--query-range.max-query-length={{ .Values.queryFrontend.maxQueryLength }}" + - "--query-range.max-query-parallelism={{ .Values.queryFrontend.maxQueryParallelism }}" + - "--query-range.response-cache-max-freshness={{ .Values.queryFrontend.responseCacheMaxFreshness }}" + {{- if .Values.queryFrontend.responseCache.enabled }} + {{- with .Values.queryFrontend.responseCache }} + - |- + --query-range.response-cache-config= + config: + max_size: {{ quote .maxSize }} + max_size_items: {{ .maxSizeItems }} + validity: {{ quote .validity }} + type: "in-memory" + {{- end }} + {{- else if .Values.queryFrontend.responseCacheConfigFile }} + - "--query-range.response-cache-config-file={{ .Values.queryFrontend.responseCacheConfigFile }}" + {{- else if .Values.queryFrontend.responseCacheConfig }} + - |- + --query-range.response-cache-config={{ toYaml .Values.queryFrontend.responseCacheConfig | nindent 12 }} + {{- end }} + {{- if .Values.queryFrontend.compressResponses }} + - "--query-frontend.compress-responses" + {{- end }} + {{- if .Values.queryFrontend.partialResponse }} + - "--query-range.partial-response" + {{- end }} + {{- if .Values.queryFrontend.extraArgs }} + {{ toYaml .Values.queryFrontend.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.queryFrontend.http.port }} + resources: + {{ toYaml .Values.queryFrontend.resources | nindent 10 }} + env: + {{- toYaml .Values.queryFrontend.extraEnv | nindent 10 }} + volumeMounts: + {{- if .Values.queryFrontend.certSecretName }} + - mountPath: /etc/certs + name: {{ .Values.queryFrontend.certSecretName }} + readOnly: true + {{- end }} + livenessProbe: + httpGet: + path: /-/healthy + port: http + volumes: + {{- if .Values.queryFrontend.certSecretName }} + - name: {{ .Values.queryFrontend.certSecretName }} + secret: + defaultMode: 420 + secretName: {{ .Values.queryFrontend.certSecretName }} + {{- end }} + {{- with .Values.queryFrontend.securityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.nodeSelector }} + nodeSelector: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.queryFrontend.serviceAccount }} + serviceAccountName: "{{ . }}" + {{- end }} +{{ end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-horizontalpodautoscaler.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-horizontalpodautoscaler.yaml new file mode 100644 index 000000000..3d08e459b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-horizontalpodautoscaler.yaml @@ -0,0 +1,37 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.queryFrontend.enabled }} +{{- if .Values.queryFrontend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "thanos.componentname" (list $ "query-frontend") }} + minReplicas: {{ .Values.queryFrontend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.queryFrontend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.queryFrontend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.queryFrontend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-ingress.yml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-ingress.yml new file mode 100644 index 000000000..5423cecf2 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-ingress.yml @@ -0,0 +1,45 @@ +--- +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.queryFrontend.enabled .Values.queryFrontend.http.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }}-http + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend + {{- if .Values.queryFrontend.http.ingress.labels }} + {{ toYaml .Values.queryFrontend.http.ingress.labels | indent 4 }} + {{- end }} + {{- with .Values.queryFrontend.http.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.queryFrontend.http.ingress.tls }} + tls: + {{- range .Values.queryFrontend.http.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + {{- if .secretName }} + secretName: {{ .secretName }} + {{- end}} + {{- end }} + {{- end }} + rules: + {{- range .Values.queryFrontend.http.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.queryFrontend.http.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "query-frontend") }}-http + servicePort: {{ $.Values.queryFrontend.http.port }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-poddisruptionbudget.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-poddisruptionbudget.yaml new file mode 100644 index 000000000..5cfef4756 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-poddisruptionbudget.yaml @@ -0,0 +1,27 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.queryFrontend.enabled .Values.queryFrontend.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + {{- if .Values.queryFrontend.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.queryFrontend.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.queryFrontend.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.queryFrontend.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/component: query-frontend +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-service.yaml new file mode 100644 index 000000000..2521e898a --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-service.yaml @@ -0,0 +1,34 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.queryFrontend.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }}-http + {{- with .Values.queryFrontend.http.service.annotations }} + annotations: {{ toYaml .| nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: {{ .Values.queryFrontend.http.service.type }} + {{- if .Values.queryFrontend.http.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.queryFrontend.http.externalTrafficPolicy }} + {{- end }} + ports: + - port: {{ .Values.queryFrontend.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-servicemonitor.yaml new file mode 100644 index 000000000..004367519 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-frontend-servicemonitor.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.queryFrontend.enabled .Values.queryFrontend.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "thanos.componentname" (list $ "query-frontend") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query-frontend +{{ with .Values.queryFrontend.metrics.serviceMonitor.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + jobLabel: thanos-query + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query-frontend + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: {{ .Values.queryFrontend.metrics.serviceMonitor.interval | default "15s" }} + {{- with .Values.queryFrontend.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{ toYaml . | nindent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-horizontalpodautoscaler.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-horizontalpodautoscaler.yaml new file mode 100644 index 000000000..9b38473d8 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-horizontalpodautoscaler.yaml @@ -0,0 +1,37 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.query.enabled }} +{{- if .Values.query.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "thanos.componentname" (list $ "query") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "thanos.componentname" (list $ "query") }} + minReplicas: {{ .Values.query.autoscaling.minReplicas }} + maxReplicas: {{ .Values.query.autoscaling.maxReplicas }} + metrics: +{{- with .Values.query.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.query.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-ingress.yml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-ingress.yml new file mode 100644 index 000000000..cfda4a0e3 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-ingress.yml @@ -0,0 +1,89 @@ +--- +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.query.enabled .Values.query.http.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "query") }}-http + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query + {{- if .Values.query.http.ingress.labels }} + {{ toYaml .Values.query.http.ingress.labels | indent 4 }} + {{- end }} + {{- with .Values.query.http.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.query.http.ingress.tls }} + tls: + {{- range .Values.query.http.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + {{- if .secretName }} + secretName: {{ .secretName }} + {{- end}} + {{- end }} + {{- end }} + rules: + {{- range .Values.query.http.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.query.http.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "query") }}-http + servicePort: {{ $.Values.query.http.port }} + {{- end }} +{{- end }} + +{{- if and .Values.query.enabled .Values.query.grpc.ingress.enabled }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "query") }}-grpc + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query + {{- if .Values.query.grpc.ingress.labels }} + {{ toYaml .Values.grpc.ingress.labels | indent 4 }} + {{- end }} + {{- with .Values.query.grpc.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.query.grpc.ingress.tls }} + tls: + {{- range .Values.query.grpc.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + {{- if .secretName }} + secretName: {{ .secretName }} + {{- end}} + {{- end }} + {{- end }} + rules: + {{- range .Values.query.grpc.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.query.grpc.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "query") }}-grpc + servicePort: {{ $.Values.query.http.port }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-poddisruptionbudget.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-poddisruptionbudget.yaml new file mode 100644 index 000000000..efc2c817c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-poddisruptionbudget.yaml @@ -0,0 +1,27 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.query.enabled .Values.query.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "thanos.componentname" (list $ "query") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +{{ with .Values.query.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + {{- if .Values.query.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.query.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.query.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.query.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/component: query +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-service.yaml new file mode 100644 index 000000000..89178a4e1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-service.yaml @@ -0,0 +1,65 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.query.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "query") }}-grpc + {{- with .Values.query.grpc.service.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +{{ with .Values.query.grpc.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.query.grpc.port }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query +{{ with .Values.query.grpc.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} + +--- + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "query") }}-http + {{- with .Values.query.http.service.annotations }} + annotations: {{ toYaml .| nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +{{ with .Values.query.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: {{ .Values.query.http.service.type }} + {{- if .Values.query.http.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.query.http.externalTrafficPolicy }} + {{- end }} + ports: + - port: {{ .Values.query.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query +{{ with .Values.query.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-servicemonitor.yaml new file mode 100644 index 000000000..673445428 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/query-servicemonitor.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.query.enabled .Values.query.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "thanos.componentname" (list $ "query") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: query +{{ with .Values.query.metrics.serviceMonitor.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + jobLabel: thanos-query + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: query + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: {{ .Values.query.metrics.serviceMonitor.interval | default "15s" }} + {{- with .Values.query.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{ toYaml . | nindent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-service.yaml new file mode 100644 index 000000000..021bdfaf9 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-service.yaml @@ -0,0 +1,61 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.sidecar.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "sidecar") }}-grpc + {{- with .Values.sidecar.grpc.service.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: sidecar +{{ with .Values.sidecar.grpc.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.sidecar.grpc.port }} + protocol: TCP + targetPort: grpc + name: grpc + selector: + app: prometheus +{{ with .Values.sidecar.grpc.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} + +--- + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "sidecar") }}-http + {{- with .Values.sidecar.http.service.annotations }} + annotations: {{ toYaml .| nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: sidecar +{{ with .Values.store.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: {{ .Values.sidecar.http.service.type }} + {{- if .Values.sidecar.http.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.sidecar.http.externalTrafficPolicy }} + {{- end }} + ports: + - port: {{ .Values.sidecar.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app: prometheus +{{ with .Values.sidecar.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-servicemonitor.yaml new file mode 100644 index 000000000..6271a23ca --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/sidecar-servicemonitor.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.sidecar.enabled .Values.sidecar.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "thanos.componentname" (list $ "sidecar") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: sidecar +{{ with .Values.sidecar.metrics.serviceMonitor.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + jobLabel: thanos-sidecar + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: sidecar + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: {{ .Values.sidecar.metrics.serviceMonitor.interval | default "15s" }} + {{- with .Values.sidecar.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{ toYaml . | nindent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-deployment.yaml new file mode 100644 index 000000000..d8f0d3b1b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-deployment.yaml @@ -0,0 +1,121 @@ +{{ if .Values.global.thanos.enabled }} +{{ if .Values.store.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "thanos.componentname" (list $ "store") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store +{{ with .Values.store.deploymentLabels }}{{ toYaml . | indent 4 }}{{ end }} + {{- with .Values.store.deploymentAnnotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.store.replicaCount | default 1 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: store +{{ with .Values.store.deploymentMatchLabels }}{{ toYaml . | indent 6 }}{{ end }} + template: + metadata: + labels: +{{ with .Values.store.labels }}{{ toYaml . | indent 8 }}{{ end }} + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: store + {{- with .Values.store.annotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.store.metrics.annotations.enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.store.http.port }}" + {{- end }} + spec: + containers: + - name: thanos-store + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - "store" + - "--data-dir=/var/thanos/store" + - "--log.level={{ .Values.store.logLevel }}" + - "--http-address=0.0.0.0:{{ .Values.store.http.port }}" + - "--grpc-address=0.0.0.0:{{ .Values.store.grpc.port }}" + - "--objstore.config-file=/etc/config/object-store.yaml" + {{- if .Values.store.indexCacheSize }} + - "--index-cache-size={{ .Values.store.indexCacheSize }}" + {{- end }} + {{- if .Values.store.chunkPoolSize }} + - "--chunk-pool-size={{ .Values.store.chunkPoolSize }}" + {{- end }} + {{- if .Values.store.grpcSeriesSampleLimit }} + - "--store.grpc.series-sample-limit={{ .Values.store.grpcSeriesSampleLimit }}" + {{- end }} + {{- if .Values.store.grpcSeriesMaxConcurrency }} + - "--store.grpc.series-max-concurrency={{ .Values.store.grpcSeriesMaxConcurrency }}" + {{- end }} + {{- if .Values.store.syncBlockDuration }} + - "--sync-block-duration={{ .Values.store.syncBlockDuration }}" + {{- end }} + {{- if .Values.store.blockSyncConcurrency }} + - "--block-sync-concurrency={{ .Values.store.blockSyncConcurrency }}" + {{- end }} + {{- if .Values.store.extraArgs }} + {{ toYaml .Values.store.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: http + containerPort: {{ .Values.store.http.port }} + - name: grpc + containerPort: {{ .Values.store.grpc.port }} + env: + {{- toYaml .Values.store.extraEnv | nindent 10 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + - name: data + mountPath: /var/thanos/store + {{- if .Values.store.certSecretName }} + - mountPath: /etc/certs + name: {{ .Values.store.certSecretName }} + readOnly: true + {{- end }} + resources: + {{ toYaml .Values.store.resources | nindent 10 }} + volumes: + - name: data + emptyDir: {} + - name: config-volume + secret: + secretName: {{ include "thanos.secretname" . }} + {{- if .Values.store.certSecretName }} + - name: {{ .Values.store.certSecretName }} + secret: + defaultMode: 420 + secretName: {{ .Values.store.certSecretName }} + {{- end }} + {{- with .Values.store.securityContext }} + securityContext: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.store.nodeSelector }} + nodeSelector: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.store.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.store.tolerations }} + tolerations: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.store.serviceAccount }} + serviceAccountName: "{{ . }}" + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-ingress.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-ingress.yaml new file mode 100644 index 000000000..4d18c019e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-ingress.yaml @@ -0,0 +1,85 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.store.enabled .Values.store.http.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "store") }}-http + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store + {{- if .Values.store.http.ingress.labels }} + {{ toYaml .Values.store.http.ingress.labels | indent 4 }} + {{- end }} + {{- with .Values.store.http.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.store.http.ingress.tls }} + tls: + {{- range .Values.store.http.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.store.http.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.store.http.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "store") }}-http + servicePort: {{ $.Values.store.http.port }} + {{- end }} +{{- end }} + +--- + + {{- if and .Values.store.enabled .Values.store.grpc.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "thanos.componentname" (list $ "store") }}-grpc + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store + {{- if .Values.store.grpc.ingress.labels }} + {{ toYaml .Values.grpc.ingress.labels | indent 4 }} + {{- end }} + {{- with .Values.store.grpc.ingress.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.store.grpc.ingress.tls }} + tls: + {{- range .Values.store.grpc.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.store.grpc.ingress.hosts }} + - host: {{ . }} + http: + paths: + - path: {{ $.Values.store.grpc.ingress.path }} + backend: + serviceName: {{ include "thanos.componentname" (list $ "store") }}-grpc + servicePort: {{ $.Values.store.http.port }} + {{- end }} +{{- end }} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-service.yaml new file mode 100644 index 000000000..ce5894fa3 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-service.yaml @@ -0,0 +1,65 @@ +{{ if .Values.global.thanos.enabled }} +{{- if .Values.store.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "store") }}-grpc + {{- with .Values.store.grpc.service.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store +{{ with .Values.store.grpc.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - port: {{ .Values.store.grpc.port }} + targetPort: grpc + protocol: TCP + name: grpc + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: store +{{ with .Values.store.grpc.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} + +--- + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "thanos.componentname" (list $ "store") }}-http + {{- with .Values.store.http.service.annotations }} + annotations: {{ toYaml .| nindent 4 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ $.Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store +{{ with .Values.store.http.service.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + type: {{ .Values.store.http.service.type }} + {{- if .Values.store.http.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.store.http.externalTrafficPolicy }} + {{- end }} + ports: + - port: {{ .Values.store.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: store +{{ with .Values.store.http.service.matchLabels }}{{ toYaml . | indent 4 }}{{ end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-servicemonitor.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-servicemonitor.yaml new file mode 100644 index 000000000..c181c6416 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/templates/store-servicemonitor.yaml @@ -0,0 +1,32 @@ +{{ if .Values.global.thanos.enabled }} +{{- if and .Values.store.enabled .Values.store.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "thanos.componentname" (list $ "store") }} + labels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + helm.sh/chart: {{ include "thanos.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/version: {{ .Chart.AppVersion | replace "+" "_" }} + app.kubernetes.io/component: store +{{ with .Values.store.metrics.serviceMonitor.labels }}{{ toYaml . | indent 4 }}{{ end }} +spec: + jobLabel: thanos-store + selector: + matchLabels: + app.kubernetes.io/name: {{ include "thanos.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/component: store + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + endpoints: + - port: http + interval: {{ .Values.store.metrics.serviceMonitor.interval | default "15s" }} + {{- with .Values.store.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{ toYaml . | nindent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/values.yaml b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/values.yaml new file mode 100644 index 000000000..0a11a2627 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/charts/thanos/values.yaml @@ -0,0 +1,739 @@ +image: + repository: thanosio/thanos + tag: v0.15.0 + pullPolicy: IfNotPresent + +store: + enabled: true + # Maximum size of items held in the index cache. + indexCacheSize: 250MB + # Maximum size of concurrently allocatable bytes for chunks. + chunkPoolSize: 2GB + # Maximum amount of samples returned via a single series call. 0 means no limit. + # NOTE: for efficiency we take 120 as the number of samples in chunk (it cannot be bigger than that), + # so the actual number of samples might be lower, even though the maximum could be hit. + grpcSeriesSampleLimit: 0 + # Maximum number of concurrent Series calls. + grpcSeriesMaxConcurrency: 20 + # Repeat interval for syncing the blocks between local and remote view. + syncBlockDuration: 3m + # Number of goroutines to use when syncing blocks from object storage. + blockSyncConcurrency: 20 + # Log filtering level. + logLevel: info + # Add extra environment variables to store + extraEnv: [] + # - name: ENV + # value: value + # + # Add extra arguments to the store service + extraArgs: [] + # - "--extraargs=extravalue" + # + # Number of replicas running from store component + replicaCount: 1 + # Extra labels for store pod template + labels: {} + # cluster: example + # + # Extra annotations for store pod template + annotations: {} + # example.com: default + # + # Add extra labels to store deployment + deploymentLabels: {} + # extraLabel: extraLabelValue + # + # Add extra annotations to store deployment + deploymentAnnotations: {} + # extraAnnotation: extraAnnotationValue + # + # Add extra selector matchLabels to store deployment + deploymentMatchLabels: {} + # Enable metrics collecting for store service + metrics: + # This is the Prometheus annotation type scraping configuration + annotations: + enabled: false + # Enable ServiceMonitor https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + # Labels for prometheus-operator to find servicemonitor + labels: {} + # The grpc endpoint to communicate with other components + grpc: + # grpc listen port number + port: 10901 + # Service definition for query grpc service + service: + # Annotations to query grpc service + annotations: {} + # Labels to query grpc service + labels: {} + matchLabels: {} + # Set up ingress for the grpc service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # The http endpoint to communicate with other components + http: + # http listen port number + port: 10902 + # Service definition for query http service + service: + type: ClusterIP + # Annotations to query http service + annotations: {} + # Labels to query http service + labels: {} + matchLabels: {} + # Set up ingress for the http service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # Optional securityContext + securityContext: {} + resources: {} + # limits: + # cpu: 2000m + # memory: 16Gi + # requests: + # cpu: 1000m + # memory: 4Gi + # + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # + # Node labels for store pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + # + # Pod affinity + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + serviceAccount: "" + +# Query Frontend Component +queryFrontend: + enabled: true + + # Split queries by an interval and execute in parallel, 0 disables it. + splitInterval: 24h + + # Maximum number of retries for a single request; beyond this, the downstream error is returned. + maxRetriesPerRequest: 5 + + # Limit the query time range (end - start time) in the query-frontend, 0 disables it. + maxQueryLength: 0 + + # Maximum number of queries will be scheduled in parallel by the frontend.\ + maxQueryParallelism: 14 + + # Most recent allowed cacheable result, to prevent caching very recent results that might still be in flux. + responseCacheMaxFreshness: 1m + + # Path to YAML file that contains response cache configuration. + # responseCacheConfigFile: + + # Response Cache Configuration + responseCache: + enabled: false + maxSize: 512MB + maxSizeItems: 0 + validity: 10m + + # Response cache configuration content + # responseCacheConfig: + + # Enable partial response for queries if no partial_response param is specified. --no-query-range.partial-response for disabling. + # partialResponse: false + + # Compress HTTP responses. + compressResponses: true + + logLevel: info + # Add extra environment variables to query + extraEnv: [] + # - name: ENV + # value: value + # + # Add extra arguments to the query service + extraArgs: [] + # - "--extraargs=extravalue" + # + # Number of replicas running from query component + replicaCount: 1 + # Enable HPA for query component + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # Enable podDisruptionBudget for query component + podDisruptionBudget: + enabled: false + # minAvailable and maxUnavailable can't be used simultaneous. Choose one. + minAvailable: 1 + # maxUnavailable: 50% + + serviceAccount: "" + + # The http endpoint to communicate with other components + http: + # http listen port number + port: 10902 + # Service definition for query http service + service: + type: ClusterIP + # Annotations to query http service + annotations: {} + # Labels to query http service + labels: {} + matchLabels: {} + # Set up ingress for the http service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + certSecretName: "" + # Extra labels for query pod template + labels: {} + # cluster: example + # + # Extra annotations for query pod template + annotations: {} + # example.com: default + # + # Add extra labels to query deployment + deploymentLabels: {} + # extraLabel: extraLabelValue + # + # Add extra annotations to query deployment + deploymentAnnotations: {} + # extraAnnotation: extraAnnotationValue + # + # Add extra selector matchLabels to query deployment + deploymentMatchLabels: {} + + # Enable metrics collecting for query service + metrics: + # This is the Prometheus annotation type scraping configuration + annotations: + enabled: false + # Enable ServiceMonitor https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + # Labels for prometheus-operator to find servicemonitor + labels: {} + + # Optional securityContext + securityContext: {} + resources: {} + # limits: + # cpu: 2000m + # memory: 16Gi + # requests: + # cpu: 1000m + # memory: 4Gi + # + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # + # Node labels for compact pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + # + # Pod affinity + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + +query: + enabled: true + # Label to treat as a replica indicator along which data is deduplicated. + # Still you will be able to query without deduplication using 'dedup=false' parameter. + replicaLabel: "" + # Prefix for API and UI endpoints. This allows thanos UI to be served on a sub-path. + # This option is analogous to --web.route-prefix of Promethus. + webRoutePrefix: "" + # Static prefix for all HTML links and redirect + # URLs in the UI query web interface. Actual + # endpoints are still served on / or the + # web.route-prefix. This allows thanos UI to be + # served behind a reverse proxy that strips a URL + # sub-path. + webExternalPrefix: "" + # Name of HTTP request header used for dynamic prefixing of UI links and redirects. + # This option is ignored if web.external-prefix argument is set. Security risk: enable this + # option only if a reverse proxy in front of thanos is resetting the header. The --web.prefix-header=X-Forwarded-Prefix option + # can be useful, for example, if Thanos UI is served via Traefik reverse proxy with PathPrefixStrip option enabled, which sends the + # stripped prefix value in X-Forwarded-Prefix header. This allows thanos UI to be served on a sub-path + webPrefixHeader: "" + # Maximum time to process query by query node. + timeout: 2m + # Maximum number of queries processed concurrently by query node. + maxConcurrent: 16 + # Maximum number of select requests made concurrently per a query. + maxConcurrentSelect: 4 + # Enable automatic adjustment (step / 5) to what source of data should be used in store gateways + # if no max_source_resolution param is specified. + autoDownsampling: false + # https://github.com/improbable-eng/thanos/issues/1015 + storeDNSResolver: miekgdns + # Enable DNS discovery for stores + storeDNSDiscovery: true + # Enable DNS discovery for sidecars (this is for the chart built-in sidecar service) + sidecarDNSDiscovery: true + # Addresses of statically configured store API servers (repeatable). + # The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect store API servers through respective DNS lookups. + stores: [] + # - "dnssrv+_grpc._tcp...svc" + # + # Path to files that contains addresses of store API servers. The path can be a glob pattern (repeatable). + serviceDiscoveryFiles: [] + # Names of configmaps that contain addresses of store API servers, used for file service discovery. + serviceDiscoveryFileConfigMaps: [] + # Refresh interval to re-read file SD files. It is used as a resync fallback. + serviceDiscoveryInterval: 5m + # Log filtering level. + logLevel: info + # Add extra environment variables to query + extraEnv: [] + # - name: ENV + # value: value + # + # Add extra arguments to the query service + extraArgs: [] + # - "--extraargs=extravalue" + # + # Number of replicas running from query component + replicaCount: 1 + # Enable HPA for query component + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # Enable podDisruptionBudget for query component + podDisruptionBudget: + enabled: false + # minAvailable and maxUnavailable can't be used simultaneous. Choose one. + minAvailable: 1 + # maxUnavailable: 50% + + # The http endpoint to communicate with other components + http: + # http listen port number + port: 10902 + # Service definition for query http service + service: + type: ClusterIP + # Annotations to query http service + annotations: {} + # Labels to query http service + labels: {} + matchLabels: {} + # Set up ingress for the http service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + certSecretName: "" + # Extra labels for query pod template + labels: {} + # cluster: example + # + # Extra annotations for query pod template + annotations: {} + # example.com: default + # + # Add extra labels to query deployment + deploymentLabels: {} + # extraLabel: extraLabelValue + # + # Add extra annotations to query deployment + deploymentAnnotations: {} + # extraAnnotation: extraAnnotationValue + # + # Add extra selector matchLabels to query deployment + deploymentMatchLabels: {} + + # Enable metrics collecting for query service + metrics: + # This is the Prometheus annotation type scraping configuration + annotations: + enabled: false + # Enable ServiceMonitor https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + # Labels for prometheus-operator to find servicemonitor + labels: {} + + # Optional securityContext + securityContext: {} + resources: {} + # limits: + # cpu: 2000m + # memory: 16Gi + # requests: + # cpu: 1000m + # memory: 4Gi + # + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # + # Node labels for compact pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + # + # Pod affinity + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + + # The grpc endpoint to communicate with other components + grpc: + # grpc listen port number + port: 10901 + # Service definition for query grpc service + service: + # Annotations to query grpc service + annotations: {} + # labels to query grpc service + labels: {} + matchLabels: {} + # Set up ingress for the grpc service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + serviceAccount: "" + +compact: + enabled: true + # Minimum age of fresh (non-compacted) blocks before they are being processed. + # Malformed blocks older than the maximum of consistency-delay and 30m0s will be removed. + consistencyDelay: 30m + # How long to retain raw samples in bucket. Setting this to 0d will retain samples of this resolution forever + retentionResolutionRaw: 1825d + # How long to retain samples of resolution 1 (5 minutes) in bucket. Setting this to 0d will retain samples of this resolution forever + retentionResolution5m: 1825d + # How long to retain samples of resolution 2 (1 hour) in bucket. Setting this to 0d will retain samples of this resolution forever + retentionResolution1h: 1825d + # Number of goroutines to use when compacting groups. + compactConcurrency: 1 + # Number of goroutines to use when syncing block metadata from object storage. + blockSyncConcurrency: 20 + # Disables Downsampling data + disableDownsampling: false + # Log filtering level. + logLevel: info + # Compact service listening http port + http: + port: 10902 + service: + labels: {} + # Add extra environment variables to compact + extraEnv: + # - name: ENV + # value: value + # + # Add extra arguments to the compact service + extraArgs: + # - "--extraargs=extravalue" + # + # Data volume for the compactor to store temporary data defaults to emptyDir + # dataVolume: + # persistentVolumeClaim: + # claimName: compact-data-volume + # storage: 100Gi + # Extra labels for compact pod template + labels: {} + # cluster: example + # + # Extra annotations for compact pod template + annotations: {} + # example.com: default + # + # Add extra labels to compact deployment + deploymentLabels: {} + # extraLabel: extraLabelValue + # + # Add extra annotations to compact deployment + deploymentAnnotations: {} + # extraAnnotation: extraAnnotationValue + # + # Add extra selector matchLabels to compact deployment + deploymentMatchLabels: {} + # + # Enable metrics collecting for compact service + metrics: + # This is the Prometheus annotation type scraping configuration + annotations: + enabled: false + # Enable ServiceMonitor https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + # Labels for prometheus-operator to find servicemonitor + labels: {} + serviceAccount: "" + + # Optional securityContext + securityContext: {} + resources: {} + # limits: + # cpu: 2000m + # memory: 16Gi + # requests: + # cpu: 1000m + # memory: 4Gi + # + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # + # Node labels for compact pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + # + # Pod affinity + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + +bucket: + enabled: true + # Number of replicas running from bucket component + replicaCount: 1 + # Log filtering level. + logLevel: info + # Refresh interval to download metadata from remote storage + refresh: 30m + # Timeout to download metadata from remote storage + timeout: 5m + # Prometheus label to use as timeline title + label: "" + # The http endpoint to communicate with other components + http: + # http listen port number + port: 8080 + # Service definition for bucket http service + service: + type: ClusterIP + # Annotations to bucket http service + annotations: {} + # Labels to bucket http service + labels: {} + matchLabels: {} + # Set up ingress for the http service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # Add extra environment variables to bucket + extraEnv: + # - name: ENV + # value: value + # + # Add extra arguments to the bucket service + extraArgs: + # - "--extraargs=extravalue" + # + # Extra labels for bucket pod template + labels: {} + # cluster: example + # + # Extra annotations for bucket pod template + annotations: {} + # example.com: default + # + # Add extra labels to bucket deployment + deploymentLabels: {} + # extraLabel: extraLabelValue + # + # Add extra annotations to bucket deployment + deploymentAnnotations: {} + # + # Add extra selector matchLabels to bucket deployment + deploymentMatchLabels: {} + + # Enable podDisruptionBudget for bucket component + podDisruptionBudget: + enabled: false + # minAvailable and maxUnavailable can't be used simultaneous. Choose one. + minAvailable: 1 + # maxUnavailable: 50% + + # Optional securityContext + securityContext: {} + resources: {} + # limits: + # cpu: 2000m + # memory: 16Gi + # requests: + # cpu: 1000m + # memory: 4Gi + # + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # + # Node labels for bucket pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + # + # Pod affinity + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + affinity: {} + serviceAccount: "" + +sidecar: + # NOTE: This is only the service references for the sidecar + enabled: true + # Enable metrics collecting for sidecar service + metrics: + # Enable ServiceMonitor https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + # Labels for prometheus-operator to find servicemonitor + labels: {} + # The grpc endpoint to communicate with other components + grpc: + # grpc listen port number + port: 10901 + # Service definition for sidecar grpc service + service: + # Annotations to sidecar grpc service + annotations: {} + # Labels to sidecar grpc service + labels: {} + matchLabels: {} + # Set up ingress for the grpc service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # The http endpoint to communicate with other components + http: + # http listen port number + port: 10902 + # Service definition for sidecar http service + service: + type: ClusterIP + # Annotations to sidecar http service + annotations: {} + # Labels to sidecar http service + labels: {} + matchLabels: {} + # Set up ingress for the http service + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: "/" + hosts: + - "/" + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +storeSecretName: diff --git a/charts/kubecost/cost-analyzer/1.70.000/cluster-metrics.json b/charts/kubecost/cost-analyzer/1.70.000/cluster-metrics.json new file mode 100644 index 000000000..0e6638252 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/cluster-metrics.json @@ -0,0 +1,1663 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Cost metrics from the Kubecost product", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 7, + "iteration": 1558062099204, + "links": [], + "panels": [ + { + "content": "Note: this dashboard requires Kubecost metrics to be available in your Prometheus deployment. [Learn more](https://github.com/kubecost/cost-model/blob/master/PROMETHEUS.md)", + "gridPos": { + "h": 2, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 27, + "links": [], + "mode": "markdown", + "title": "", + "transparent": true, + "type": "text" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Monthly run rate of CPU + GPU costs based on currently provisioned resources.", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 2 + }, + "hideTimeOverride": true, + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(\n avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100) +\n avg(node_gpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n)", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "CPU Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Monthly run rate of memory costs based on currently provisioned expenses.", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 2 + }, + "hideTimeOverride": true, + "id": 3, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(\n avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n)", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Memory Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Monthly run rate of attached storage and PV costs based on currently provisioned resources.", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 2 + }, + "hideTimeOverride": true, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(avg(pv_hourly_cost) by (persistentvolume) * 730 * avg(kube_persistentvolume_capacity_bytes) by (persistentvolume) / 1024 / 1024 / 1024) \n+\nsum(sum(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance) / 1024 / 1024 / 1024) * $localStorageGBCost", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Storage Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Sum of compute, memory, storage and network costs.", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 2 + }, + "hideTimeOverride": true, + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "# Compute\nsum(\n avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100) +\n avg(node_gpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n) +\n\n\n# Memory\nsum(\n avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n) +\n\n# Storage \n\nsum(avg(pv_hourly_cost) by (persistentvolume) * 730 * avg(kube_persistentvolume_capacity_bytes) by (persistentvolume) / 1024 / 1024 / 1024) \n+\nsum(sum(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance) / 1024 / 1024 / 1024) * $localStorageGBCost", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Total Cost", + "type": "singlestat", + "valueFontSize": "120%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Current CPU use from applications divided by allocatable CPUs", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 5 + }, + "height": "180px", + "hideTimeOverride": true, + "id": 13, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(\n sum(\n count(irate(container_cpu_usage_seconds_total{id=\"/\"}[10m])) by (instance)\n * on (instance) \n sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[10m])) by (instance)\n ) \n / \n (sum (kube_node_status_allocatable_cpu_cores))\n) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "30, 80", + "timeFrom": "", + "title": "CPU Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Current CPU reservation requests from applications vs allocatable CPU", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 5 + }, + "height": "180px", + "id": 15, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "SUM(kube_pod_container_resource_requests_cpu_cores) / SUM(kube_node_status_allocatable_cpu_cores) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "30, 80", + "title": "CPU Requests", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource": "default-kubecost", + "description": "Current RAM use vs RAM available", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 5 + }, + "height": "180px", + "hideTimeOverride": true, + "id": 17, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "SUM(container_memory_usage_bytes{namespace!=\"\"}) / SUM(kube_node_status_allocatable_memory_bytes) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 10 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" + } + ], + "thresholds": "30,80", + "timeFrom": "", + "title": "RAM Utilization", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource": "default-kubecost", + "description": "Current RAM requests vs RAM available", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 5 + }, + "height": "180px", + "id": 19, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\"})\n /\n sum(kube_node_status_allocatable_memory_bytes)\n) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "30,80", + "title": "RAM Requests", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "This gauge shows the current standard storage use, including cluster storage, vs storage available", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 5 + }, + "height": "180px", + "hideTimeOverride": true, + "id": 21, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (\n sum(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kubelet_volume_stats_used_bytes) by (persistentvolumeclaim, namespace) or up * 0\n + sum(container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"})\n) /\nsum (\n sum(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n + sum(container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"})\n) * 100", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "30, 80", + "timeFrom": "", + "title": "Storage Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Monthly run rate of CPU + GPU costs", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 9 + }, + "id": 6, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost) by (node) * 730 +\n avg(node_gpu_hourly_cost) by (node) * 730\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "compute cost", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Compute Cost", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Monthly run rate of memory costs", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 9 + }, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "memory cost", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Cost", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Monthly run rate of attached disk + PV storage costs", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 9 + }, + "id": 10, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n avg(avg_over_time(pv_hourly_cost[$timeRange] offset 1m)) by (persistentvolume) * 730 \n * avg(avg_over_time(kube_persistentvolume_capacity_bytes[$timeRange] offset 1m)) by (persistentvolume) / 1024 / 1024 / 1024\n) +\nsum(avg(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance) / 1024 / 1024 / 1024) * $localStorageGBCost", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "storage cost", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Storage Cost", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Sum of compute, memory, and storage costs", + "fill": 1, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 9 + }, + "id": 22, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "# Compute\nsum(\n avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100) +\n avg(node_gpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n) +\n\n\n# Memory\nsum(\n avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n) +\n\n# Storage \n\nsum(avg(pv_hourly_cost) by (persistentvolume) * 730 * avg(kube_persistentvolume_capacity_bytes) by (persistentvolume) / 1024 / 1024 / 1024) \n+\nsum(sum(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance) / 1024 / 1024 / 1024) * $localStorageGBCost", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "total cost", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Cost", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "default-kubecost", + "description": "Cost of by resource class of currently provisioned nodes", + "fontSize": "100%", + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 8, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 4, + "desc": false + }, + "styles": [ + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Compute Cost", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Cost", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "currencyUSD" + }, + { + "alias": "Mem Cost", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "currencyUSD" + }, + { + "alias": "Total", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "currencyUSD" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "GPU", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost or up * 0) by (node) * 730 * (1-$useDiscount/100)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730 * (1-$useDiscount/100)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + }, + { + "expr": "avg(node_gpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "D" + }, + { + "expr": "# CPU \navg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost or up * 0) by (node) * 730 * (1-$useDiscount/100) +\n# GPU\navg(node_gpu_hourly_cost) by (node) * 730 * (1-$useDiscount/100) +\n# Memory\navg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730 * (1-$useDiscount/100)\n", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "C" + } + ], + "title": "Cost by node", + "transform": "table", + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Monthly run rate of attached disk + PV storage costs based on currently provisioned resources.", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 25, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n avg(kube_node_status_capacity_cpu_cores) by (node) * avg(node_cpu_hourly_cost) by (node) * 730 +\n avg(node_gpu_hourly_cost) by (node) * 730\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "cpu", + "refId": "B" + }, + { + "expr": "sum(\n avg(kube_node_status_capacity_memory_bytes) by (node) / 1024 / 1024 / 1024 * avg(node_ram_hourly_cost) by (node) * 730\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "memory", + "refId": "A" + }, + { + "expr": "sum(\n avg(avg_over_time(pv_hourly_cost[$timeRange] offset 1m)) by (persistentvolume) * 730 \n * avg(avg_over_time(kube_persistentvolume_capacity_bytes[$timeRange] offset 1m)) by (persistentvolume) / 1024 / 1024 / 1024\n) +\nsum(avg(container_fs_limit_bytes{device!=\"tmpfs\", id=\"/\"}) by (instance) / 1024 / 1024 / 1024) * $localStorageGBCost", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "storage", + "refId": "C" + }, + { + "expr": "SUM(rate(node_network_transmit_bytes_total{device=\"eth0\"}[60m]) / 1024 / 1024 / 1024 ) * (60 * 60 * 24 * 30) * $percentEgress * $egressCost ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "network", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Cost by Resource", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "cost", + "utilization", + "metrics" + ], + "templating": { + "list": [ + { + "auto": true, + "auto_count": 1, + "auto_min": "1m", + "current": { + "text": "auto", + "value": "$__auto_interval_timeRange" + }, + "hide": 2, + "label": null, + "name": "timeRange", + "options": [ + { + "selected": true, + "text": "auto", + "value": "$__auto_interval_timeRange" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + }, + { + "selected": false, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "14d", + "value": "14d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + }, + { + "selected": false, + "text": "90d", + "value": "90d" + } + ], + "query": "1h,6h,12h,1d,7d,14d,30d,90d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + }, + { + "current": { + "text": ".04", + "value": ".04" + }, + "hide": 2, + "label": "Cost per Gb hour for attached disks", + "name": "localStorageGBCost", + "options": [ + { + "selected": true, + "text": ".04", + "value": ".04" + } + ], + "query": ".04", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "tags": [], + "text": "0", + "value": "0" + }, + "hide": 0, + "label": "Sustained Use Discount %", + "name": "useDiscount", + "options": [ + { + "selected": true, + "text": "0", + "value": "0" + } + ], + "query": "0", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "text": ".1", + "value": ".1" + }, + "hide": 2, + "label": null, + "name": "percentEgress", + "options": [ + { + "selected": true, + "text": ".1", + "value": ".1" + } + ], + "query": ".1", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "text": ".12", + "value": ".12" + }, + "hide": 2, + "label": null, + "name": "egressCost", + "options": [ + { + "selected": true, + "text": ".12", + "value": ".12" + } + ], + "query": ".12", + "skipUrlSync": false, + "type": "constant" + } + ] + }, + "time": { + "from": "now-7d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubecost cluster metrics", + "uid": "JOUdHGZZz", + "version": 20 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/cluster-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/cluster-utilization.json new file mode 100644 index 000000000..0c3acfc76 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/cluster-utilization.json @@ -0,0 +1,3473 @@ +{ + "annotations":{ + "list":[ + { + "builtIn":1, + "datasource":"-- Grafana --", + "enable":true, + "hide":true, + "iconColor":"rgba(0, 211, 255, 1)", + "name":"Annotations & Alerts", + "type":"dashboard" + } + ] + }, + "description":"A dashboard to help manage Kubernetes cluster costs and resources", + "editable":true, + "gnetId":6873, + "graphTooltip":0, + "id":4, + "iteration":1556759633456, + "links":[ + + ], + "panels":[ + { + "content":"This dashboard shows monthly cost estimates for the cluster, based on **current** CPU, RAM and storage provisioned.", + "gridPos":{ + "h":2, + "w":24, + "x":0, + "y":0 + }, + "id":86, + "links":[ + + ], + "mode":"markdown", + "title":"", + "transparent":true, + "type":"text" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":2, + "format":"currencyUSD", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":6, + "x":0, + "y":2 + }, + "hideTimeOverride":true, + "id":75, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"label_cloud_google_com_gke_preemptible", + "targets":[ + { + "expr":"sum(\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) * $costpcpu\n )\n or\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) * ($costcpu - ($costcpu / 100 * $costDiscount))\n )\n) ", + "format":"time_series", + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":" {{ node }}", + "refId":"A" + } + ], + "thresholds":"", + "timeFrom":"15m", + "timeShift":null, + "title":"CPU Cost", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":2, + "format":"currencyUSD", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":6, + "x":6, + "y":2 + }, + "hideTimeOverride":true, + "id":77, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"label_cloud_google_com_gke_preemptible", + "targets":[ + { + "expr":"sum(\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) /1024/1024/1024 * $costpram\n )\n or\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) /1024/1024/1024 * ($costram - ($costram / 100 * $costDiscount))\n)\n) ", + "format":"time_series", + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":" {{ node }}", + "refId":"A" + } + ], + "thresholds":"", + "timeFrom":"15m", + "timeShift":null, + "title":"RAM Cost", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":2, + "format":"currencyUSD", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":6, + "x":12, + "y":2 + }, + "hideTimeOverride":true, + "id":78, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"label_cloud_google_com_gke_preemptible", + "targets":[ + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageSSD\n\n+\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageStandard\n\n+ \n\nsum(container_fs_limit_bytes{id=\"/\"}) / 1024 / 1024 / 1024 * 1.03 * $costStorageStandard", + "format":"time_series", + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":" {{ node }}", + "refId":"A" + } + ], + "thresholds":"", + "timeFrom":"15m", + "timeShift":null, + "title":"Storage Cost (Cluster and PVC)", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"Represents a near worst-case approximation of network costs.", + "format":"currencyUSD", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":6, + "x":18, + "y":2 + }, + "hideTimeOverride":true, + "id":129, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"label_cloud_google_com_gke_preemptible", + "targets":[ + { + "expr":"SUM(rate(node_network_transmit_bytes_total{device=\"eth0\"}[60m]) / 1024 / 1024 / 1024 ) * (60 * 60 * 24 * 30) * $costEgress", + "format":"time_series", + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":" {{ node }}", + "refId":"A" + } + ], + "thresholds":"", + "timeFrom":"15m", + "timeShift":null, + "title":"Network Egress Cost", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"Current CPU use from applications divided by allocatable CPUs", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":0, + "y":6 + }, + "height":"180px", + "hideTimeOverride":true, + "id":82, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"(\n sum(\n count(irate(container_cpu_usage_seconds_total{id=\"/\"}[10m])) by (instance)\n * on (instance) \n sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[10m])) by (instance)\n ) \n / \n (sum (kube_node_status_allocatable_cpu_cores))\n) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + } + ], + "thresholds":"30, 80", + "timeFrom":"", + "title":"CPU Utilization", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"Current CPU reservation requests from applications vs allocatable CPU", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":3, + "y":6 + }, + "height":"180px", + "id":91, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"SUM(kube_pod_container_resource_requests_cpu_cores) / SUM(kube_node_status_allocatable_cpu_cores) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + } + ], + "thresholds":"30, 80", + "title":"CPU Requests", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "description":"Current RAM use vs RAM available", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":6, + "y":6 + }, + "height":"180px", + "hideTimeOverride":true, + "id":80, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"SUM(container_memory_usage_bytes{namespace!=\"\"}) / SUM(kube_node_status_allocatable_memory_bytes) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + }, + { + "expr":"", + "format":"time_series", + "intervalFactor":1, + "refId":"B" + } + ], + "thresholds":"30,80", + "timeFrom":"", + "title":"RAM Utilization", + "transparent":false, + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "description":"Current RAM requests vs RAM available", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":9, + "y":6 + }, + "height":"180px", + "id":92, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\"})\n /\n sum(kube_node_status_allocatable_memory_bytes)\n) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + } + ], + "thresholds":"30,80", + "title":"RAM Requests", + "transparent":false, + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"This gauge shows the current standard storage use, including cluster storage, vs storage available", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":12, + "y":6 + }, + "height":"180px", + "hideTimeOverride":true, + "id":95, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kubelet_volume_stats_used_bytes) by (persistentvolumeclaim, namespace) or up * 0\n + sum(container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"})\n) /\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n + sum(container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"})\n) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + } + ], + "thresholds":"30, 80", + "timeFrom":"", + "title":"Storage Utilization", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":true, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"This gauge shows the current SSD use vs SSD available", + "editable":true, + "error":false, + "format":"percent", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":3, + "x":15, + "y":6 + }, + "height":"180px", + "hideTimeOverride":true, + "id":96, + "interval":null, + "isNew":true, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kubelet_volume_stats_used_bytes) by (persistentvolumeclaim, namespace)\n) /\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace)\n) * 100", + "format":"time_series", + "interval":"", + "intervalFactor":1, + "refId":"A", + "step":10 + } + ], + "thresholds":"30, 80", + "timeFrom":"", + "title":"SSD Utilization", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":2, + "description":"Expected monthly cost given current CPU, memory storage, and network resource consumption", + "format":"currencyUSD", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":6, + "x":18, + "y":6 + }, + "hideTimeOverride":true, + "id":93, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"label_cloud_google_com_gke_preemptible", + "targets":[ + { + "expr":"# CPU\nsum(\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) * $costpcpu\n )\n or\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) * ($costcpu - ($costcpu / 100 * $costDiscount))\n )\n) \n\n+ \n\n# Storage\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageSSD\n\n+\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageStandard\n\n+ \n\nsum(container_fs_limit_bytes{id=\"/\"}) / 1024 / 1024 / 1024 * 1.03 * $costStorageStandard \n\n+\n\n# END STORAGE\n# RAM \nsum(\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) /1024/1024/1024 * $costpram\n )\n or\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) /1024/1024/1024 * ($costram - ($costram / 100 * $costDiscount))\n)\n)\n\n+\n\n#Network \nSUM(rate(node_network_transmit_bytes_total{device=\"eth0\"}[60m]) / 1024 / 1024 / 1024 ) * (60 * 60 * 24 * 30) * $costEgress", + "format":"time_series", + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":" {{ node }}", + "refId":"A" + } + ], + "thresholds":"", + "timeFrom":"15m", + "timeShift":null, + "title":"Total Monthly Cost", + "type":"singlestat", + "valueFontSize":"120%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "description":"Expected monthly CPU, memory and storage costs given provisioned resources", + "fill":1, + "gridPos":{ + "h":8, + "w":12, + "x":0, + "y":10 + }, + "id":120, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":false, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"# CPU\nsum(\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) * $costpcpu\n )\n or\n (\n (\n sum(kube_node_status_capacity_cpu_cores) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) * ($costcpu - ($costcpu / 100 * $costDiscount))\n )\n) \n\n+ \n\n# Storage\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageSSD\n\n+\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) or up * 0\n) / 1024 / 1024 /1024 * $costStorageStandard\n\n+ \n\nsum(container_fs_limit_bytes{id=\"/\"}) / 1024 / 1024 / 1024 * 1.03 * $costStorageStandard \n\n+\n\n# END STORAGE\n# RAM \nsum(\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible=\"true\"}) by (node)\n ) /1024/1024/1024 * $costpram\n )\n or\n (\n (\n sum(kube_node_status_capacity_memory_bytes) by (node)\n * on (node) group_left (label_cloud_google_com_gke_preemptible)\n avg(kube_node_labels{label_cloud_google_com_gke_preemptible!=\"true\"}) by (node)\n ) /1024/1024/1024 * ($costram - ($costram / 100 * $costDiscount))\n)\n) \n\n+\n\n#Network \nSUM(rate(node_network_transmit_bytes_total{device=\"eth0\"}[60m]) / 1024 / 1024 / 1024 ) * (60 * 60 * 24 * 30) * $costEgress", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"cluster cost", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Total monthly cost", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"currencyUSD", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "description":"Resources allocated to namespace based on container requests", + "fontSize":"100%", + "gridPos":{ + "h":8, + "w":12, + "x":12, + "y":10 + }, + "hideTimeOverride":false, + "id":73, + "links":[ + + ], + "pageSize":10, + "repeat":null, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":7, + "desc":true + }, + "styles":[ + { + "alias":"Namespace", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":true, + "linkTooltip":"View namespace cost metrics", + "linkUrl":"d/at-cost-analysis-namespace2/namespace-cost-metrics?&var-namespace=$__cell", + "pattern":"namespace", + "thresholds":[ + "30", + "80" + ], + "type":"string", + "unit":"currencyUSD" + }, + { + "alias":"RAM", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "pattern":"Value #B", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"CPU", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"PV Storage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"Total", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #D", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"CPU Utilization", + "colorMode":"value", + "colors":[ + "#bf1b00", + "rgba(50, 172, 45, 0.97)", + "#ef843c" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #E", + "thresholds":[ + "30", + "80" + ], + "type":"number", + "unit":"percent" + }, + { + "alias":"RAM Utilization", + "colorMode":"value", + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#ef843c" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #F", + "thresholds":[ + "30", + "80" + ], + "type":"number", + "unit":"percent" + } + ], + "targets":[ + { + "expr":"(\n sum(kube_pod_container_resource_requests_cpu_cores{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible!=\"true\"}*($costcpu - ($costcpu / 100 * $costDiscount))) by(namespace)\n or\n count(\n count(container_spec_cpu_shares{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n(\n sum(kube_pod_container_resource_requests_cpu_cores{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible=\"true\"}*$costpcpu) by(namespace)\n or\n count(\n count(container_spec_cpu_shares{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"{{ namespace }}", + "refId":"A" + }, + { + "expr":"(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible!=\"true\"} / 1024 / 1024 / 1024*($costram- ($costram / 100 * $costDiscount))) by (namespace) \n or\n count(\n count(container_spec_memory_limit_bytes{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible=\"true\"} / 1024 / 1024 / 1024 * $costpram ) by (namespace) \n or\n count(\n count(container_spec_memory_limit_bytes{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)", + "format":"table", + "instant":true, + "intervalFactor":1, + "legendFormat":"{{ namespace }}", + "refId":"B" + }, + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) \n) by (namespace) / 1024 / 1024 /1024 * $costStorageSSD \n\nor\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) \n) by (namespace) / 1024 / 1024 /1024 * $costStorageStandard", + "format":"table", + "instant":true, + "intervalFactor":1, + "legendFormat":"{{ namespace }}", + "refId":"C" + }, + { + "expr":"# CPU \n(\n sum(kube_pod_container_resource_requests_cpu_cores{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible!=\"true\"}*($costcpu - ($costcpu / 100 * $costDiscount))) by(namespace)\n or\n count(\n count(container_spec_cpu_shares{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n(\n sum(kube_pod_container_resource_requests_cpu_cores{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible=\"true\"}*$costpcpu) by(namespace)\n or\n count(\n count(container_spec_cpu_shares{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n#END CPU \n# Memory \n\n(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible!=\"true\"} / 1024 / 1024 / 1024*($costram- ($costram / 100 * $costDiscount))) by (namespace) \n or\n count(\n count(container_spec_memory_limit_bytes{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n(\n sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\",namespace!=\"kube-system\",cloud_google_com_gke_preemptible=\"true\"} / 1024 / 1024 / 1024 * $costpram ) by (namespace) \n or\n count(\n count(container_spec_memory_limit_bytes{namespace!=\"\",namespace!=\"kube-system\"}) by(namespace)\n ) by(namespace) -1\n)\n\n+\n\n# PV storage\n\n(\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) \n) by (namespace) / 1024 / 1024 /1024 * $costStorageSSD \n\nor\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim, namespace) \n) by (namespace) / 1024 / 1024 /1024 * $costStorageStandard \n)", + "format":"table", + "instant":true, + "intervalFactor":1, + "legendFormat":"Total", + "refId":"D" + } + ], + "timeFrom":"", + "timeShift":null, + "title":"Namespace cost allocation", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "collapsed":false, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":18 + }, + "id":108, + "panels":[ + + ], + "title":"CPU Metrics", + "type":"row" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":8, + "w":24, + "x":0, + "y":19 + }, + "id":116, + "legend":{ + "alignAsTable":false, + "avg":false, + "current":false, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"SUM(kube_node_status_capacity_cpu_cores)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"capacity", + "refId":"A" + }, + { + "expr":"SUM(kube_pod_container_resource_requests_cpu_cores)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"requests", + "refId":"C" + }, + { + "expr":"SUM(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m]))", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"usage", + "refId":"B" + }, + { + "expr":"SUM(kube_pod_container_resource_limits_cpu_cores) ", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"limits", + "refId":"D" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cluster CPUs", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":1, + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":8, + "w":24, + "x":0, + "y":27 + }, + "id":130, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"avg(irate(node_cpu_seconds_total{mode!=\"idle\"}[5m])) by (mode) * 100", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"{{mode}}", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"CPU Mode", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"percent", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "description":"This table shows the comparison of CPU requests and usage by namespace", + "fontSize":"100%", + "gridPos":{ + "h":10, + "w":12, + "x":0, + "y":35 + }, + "hideTimeOverride":true, + "id":104, + "links":[ + + ], + "pageSize":8, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":1, + "desc":true + }, + "styles":[ + { + "alias":"CPU Requests", + "colorMode":null, + "colors":[ + "#fceaca", + "#fce2de", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + "" + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Node", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"node", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"CPU Requests", + "colorMode":"value", + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#cffaff" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + "" + ], + "type":"number", + "unit":"short" + }, + { + "alias":"24h CPU Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + "30" + ], + "type":"number", + "unit":"none" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":true, + "linkTooltip":"View namespace cost metrics", + "linkUrl":"d/at-cost-analysis-namespace2/namespace-cost-metrics?&var-namespace=$__cell", + "mappingType":1, + "pattern":"namespace", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + } + ], + "targets":[ + { + "expr":"sum(kube_pod_container_resource_requests_cpu_cores{namespace!=\"\"}) by (namespace) ", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"", + "refId":"A" + }, + { + "expr":"sum (rate (container_cpu_usage_seconds_total{image!=\"\",namespace!=\"\"}[24h])) by (namespace)", + "format":"table", + "instant":true, + "intervalFactor":1, + "legendFormat":"{{ namespace }}", + "refId":"C" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"CPU request utilization by namespace", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "description":"This table shows the comparison of application CPU usage vs the capacity of the node (measured over last 60 minutes)", + "fontSize":"100%", + "gridPos":{ + "h":10, + "w":12, + "x":12, + "y":35 + }, + "hideTimeOverride":true, + "id":90, + "links":[ + + ], + "pageSize":8, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":2, + "desc":true + }, + "styles":[ + { + "alias":"CPU Request Utilization", + "colorMode":"value", + "colors":[ + "#ef843c", + "rgba(50, 172, 45, 0.97)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + ".30", + " .80" + ], + "type":"number", + "unit":"percentunit" + }, + { + "alias":"Node", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"node", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"CPU Utilization", + "colorMode":"value", + "colors":[ + "#ef843c", + "rgba(50, 172, 45, 0.97)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + ".20", + " .80" + ], + "type":"number", + "unit":"percentunit" + }, + { + "alias":"24h Utilization ", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value", + "thresholds":[ + + ], + "type":"number", + "unit":"percentunit" + } + ], + "targets":[ + { + "expr":"SUM(\nSUM(rate(container_cpu_usage_seconds_total[24h])) by (pod_name)\n* on (pod_name) group_left (node) \nlabel_replace(\n avg(kube_pod_info{}),\n \"pod_name\", \n \"$1\", \n \"pod\", \n \"(.+)\"\n)\n) by (node) \n/ \nsum(kube_node_status_capacity_cpu_cores) by (node)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"B" + }, + { + "expr":"sum(kube_pod_container_resource_requests_cpu_cores) by (node) / sum(kube_node_status_capacity_cpu_cores) by (node)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"A" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Cluster cost & utilization by node", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "collapsed":false, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":45 + }, + "id":113, + "panels":[ + + ], + "title":"Memory Metrics", + "type":"row" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":8, + "w":24, + "x":0, + "y":46 + }, + "id":117, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":false, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"SUM(kube_node_status_capacity_memory_bytes / 1024 / 1024 / 1024)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"capacity", + "refId":"A" + }, + { + "expr":"SUM(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\"} / 1024 / 1024 / 1024)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"requests", + "refId":"C" + }, + { + "expr":"SUM(container_memory_usage_bytes{image!=\"\"} / 1024 / 1024 / 1024)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"usage", + "refId":"B" + }, + { + "expr":"SUM(kube_pod_container_resource_limits_memory_bytes {namespace!=\"\"} / 1024 / 1024 / 1024)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"limits", + "refId":"D" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cluster memory (GB)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"decgbytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":8, + "w":24, + "x":0, + "y":54 + }, + "id":131, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":false, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"1 - sum(node_memory_MemAvailable_bytes) by (node) / sum(node_memory_MemTotal_bytes) by (node)", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"usage", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Cluster Memory Utilization", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percentunit", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "description":"Comparison of memory requests and current usage by namespace", + "fontSize":"100%", + "gridPos":{ + "h":10, + "w":12, + "x":0, + "y":62 + }, + "hideTimeOverride":true, + "id":109, + "links":[ + + ], + "pageSize":7, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":1, + "desc":true + }, + "styles":[ + { + "alias":"Mem Requests (GB)", + "colorMode":null, + "colors":[ + "#fceaca", + "#fce2de", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + "" + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Node", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"node", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"CPU Requests", + "colorMode":"value", + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#cffaff" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + "" + ], + "type":"number", + "unit":"short" + }, + { + "alias":"24h Mem Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "#508642", + "#e5ac0e" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + ".30", + ".75" + ], + "type":"number", + "unit":"none" + }, + { + "alias":"Namespace", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":true, + "linkTooltip":"View namespace cost metrics", + "linkUrl":"d/at-cost-analysis-namespace2/namespace-cost-metrics?&var-namespace=$__cell", + "mappingType":1, + "pattern":"namespace", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + } + ], + "targets":[ + { + "expr":"sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\"} / 1024 / 1024 / 1024) by (namespace) ", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"", + "refId":"A" + }, + { + "expr":"SUM(container_memory_usage_bytes{image!=\"\",namespace!=\"\"} / 1024 / 1024 / 1024) by (namespace)", + "format":"table", + "instant":true, + "intervalFactor":1, + "legendFormat":"", + "refId":"C" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Memory requests & utilization by namespace", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "description":"Container RAM usage vs node capacity", + "fontSize":"100%", + "gridPos":{ + "h":10, + "w":12, + "x":12, + "y":62 + }, + "hideTimeOverride":true, + "id":114, + "links":[ + + ], + "pageSize":8, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":1, + "desc":true + }, + "styles":[ + { + "alias":"RAM Requests", + "colorMode":"value", + "colors":[ + "#ef843c", + "rgba(50, 172, 45, 0.97)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + "30", + " 80" + ], + "type":"number", + "unit":"percentunit" + }, + { + "alias":"Node", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"node", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"RAM Usage", + "colorMode":"value", + "colors":[ + "#ef843c", + "rgba(50, 172, 45, 0.97)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + "25", + " 80" + ], + "type":"number", + "unit":"percent" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + } + ], + "targets":[ + { + "expr":"SUM(label_replace(container_memory_usage_bytes{namespace!=\"\"}, \"node\", \"$1\", \"instance\",\"(.+)\")) by (node) * 100\n/\nSUM(kube_node_status_capacity_memory_bytes) by (node)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"B" + }, + { + "expr":"sum(kube_pod_container_resource_requests_memory_bytes{namespace!=\"\"}) by (node) / SUM(kube_node_status_capacity_memory_bytes) by (node)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"A" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Node utilization of allocatable RAM", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "collapsed":false, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":72 + }, + "id":101, + "panels":[ + + ], + "title":"Storage Metrics", + "type":"row" + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "fontSize":"100%", + "gridPos":{ + "h":9, + "w":12, + "x":0, + "y":73 + }, + "hideTimeOverride":true, + "id":97, + "links":[ + + ], + "pageSize":8, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":4, + "desc":true + }, + "styles":[ + { + "alias":"Node", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"instance", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"PVC Name", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"persistentvolumeclaim", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Storage Class", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"storageclass", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Cost", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"Cost", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"Size (GB)", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + + ], + "type":"number", + "unit":"percentunit" + } + ], + "targets":[ + { + "expr":"SUM(container_fs_limit_bytes{id=\"/\"}) by (instance) / 1024 / 1024 / 1024 * 1.03", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"B" + }, + { + "expr":"SUM(container_fs_limit_bytes{id=\"/\"}) by (instance) / 1024 / 1024 / 1024 * 1.03 * $costStorageStandard\n", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"{{ persistentvolumeclaim }}", + "refId":"A" + }, + { + "expr":"sum(container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"} / container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\"}) by (instance) \n", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"C" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Local Storage", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":9, + "w":12, + "x":12, + "y":73 + }, + "id":128, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"SUM(container_fs_usage_bytes{id=\"/\"}) / SUM(container_fs_limit_bytes{id=\"/\"})", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"reads", + "refId":"D" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Local storage utilization", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":"IOPS", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "fontSize":"100%", + "gridPos":{ + "h":10, + "w":12, + "x":0, + "y":82 + }, + "hideTimeOverride":true, + "id":94, + "links":[ + + ], + "pageSize":10, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":2, + "desc":true + }, + "styles":[ + { + "alias":"Namespace", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":true, + "linkTooltip":"View namespace cost metrics", + "linkUrl":"d/at-cost-analysis-namespace2/namespace-cost-metrics?&var-namespace=$__cell", + "mappingType":1, + "pattern":"namespace", + "thresholds":[ + + ], + "type":"string", + "unit":"short" + }, + { + "alias":"PVC Name", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"persistentvolumeclaim", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Storage Class", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"storageclass", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Cost", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"Cost", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + + ], + "type":"number", + "unit":"percentunit" + }, + { + "alias":"Size (GB)", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + } + ], + "targets":[ + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n * on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim,storageclass) / 1024 / 1024 /1024\n\nor\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n * on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim,storageclass) / 1024 / 1024 /1024\n\n\n", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"C" + }, + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n * on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim,storageclass) / 1024 / 1024 /1024 * $costStorageSSD\n\nor\n\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n * on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim,storageclass) / 1024 / 1024 /1024 * $costStorageStandard\n", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"{{ persistentvolumeclaim }}", + "refId":"A" + }, + { + "expr":"sum(kubelet_volume_stats_used_bytes) by (persistentvolumeclaim, namespace) \n/\nsum (\n sum(kube_persistentvolumeclaim_info{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace, storageclass)\n * on (persistentvolumeclaim, namespace) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{storageclass!~\".*ssd.*\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"B" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Persistent Volume Claims", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":10, + "w":12, + "x":12, + "y":82 + }, + "id":132, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"SUM(rate(node_disk_reads_completed_total[10m])) or SUM(rate(node_disk_reads_completed[10m]))\n", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"reads", + "refId":"D" + }, + { + "expr":"SUM(rate(node_disk_writes_completed_total[10m])) or SUM(rate(node_disk_writes_completed[10m]))", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"writes", + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Disk IOPS", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"none", + "label":"IOPS", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":9, + "w":24, + "x":0, + "y":92 + }, + "id":122, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"SUM( kubelet_volume_stats_inodes_used / kubelet_volume_stats_inodes) by (persistentvolumeclaim) * 100", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"", + "refId":"D" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Inode usage", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"percent", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "collapsed":false, + "gridPos":{ + "h":1, + "w":24, + "x":0, + "y":101 + }, + "id":127, + "panels":[ + + ], + "title":"Network", + "type":"row" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "fill":1, + "gridPos":{ + "h":9, + "w":24, + "x":0, + "y":102 + }, + "id":123, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (node_network_transmit_bytes_total{}[60m]))\n", + "format":"time_series", + "intervalFactor":1, + "legendFormat":"node_out", + "refId":"B" + }, + { + "expr":"SUM ( rate(node_network_transmit_bytes_total{device=\"eth0\"}[60m]))", + "format":"time_series", + "instant":false, + "intervalFactor":1, + "legendFormat":"eth0 out", + "refId":"C" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Node network transmit", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"decbytes", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "refresh":"15m", + "schemaVersion":16, + "style":"dark", + "tags":[ + "cost", + "utilization", + "metrics" + ], + "templating":{ + "list":[ + { + "current":{ + "text":"23.076", + "value":"23.076" + }, + "hide":0, + "label":"CPU", + "name":"costcpu", + "options":[ + { + "text":"23.076", + "value":"23.076" + } + ], + "query":"23.076", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"5.10", + "value":"5.10" + }, + "hide":0, + "label":"PE CPU", + "name":"costpcpu", + "options":[ + { + "text":"5.10", + "value":"5.10" + } + ], + "query":"5.10", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"3.25", + "value":"3.25" + }, + "hide":0, + "label":"RAM", + "name":"costram", + "options":[ + { + "text":"3.25", + "value":"3.25" + } + ], + "query":"3.25", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"0.6862", + "value":"0.6862" + }, + "hide":0, + "label":"PE RAM", + "name":"costpram", + "options":[ + { + "text":"0.6862", + "value":"0.6862" + } + ], + "query":"0.6862", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"0.040", + "value":"0.040" + }, + "hide":0, + "label":"Storage", + "name":"costStorageStandard", + "options":[ + { + "text":"0.040", + "value":"0.040" + } + ], + "query":"0.040", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":".17", + "value":".17" + }, + "hide":0, + "label":"SSD", + "name":"costStorageSSD", + "options":[ + { + "text":".17", + "value":".17" + } + ], + "query":".17", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":".12", + "value":".12" + }, + "hide":0, + "label":"Egress", + "name":"costEgress", + "options":[ + { + "selected":true, + "text":".12", + "value":".12" + } + ], + "query":".12", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"30", + "value":"30" + }, + "hide":0, + "label":"Discount", + "name":"costDiscount", + "options":[ + { + "text":"30", + "value":"30" + } + ], + "query":"30", + "skipUrlSync":false, + "type":"constant" + } + ] + }, + "time":{ + "from":"now-24h", + "to":"now" + }, + "timepicker":{ + "hidden":false, + "refresh_intervals":[ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options":[ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone":"browser", + "title":"Cluster cost & utilization metrics", + "uid":"cluster-costs", + "version":1 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/deployment-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/deployment-utilization.json new file mode 100644 index 000000000..ff5526c15 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/deployment-utilization.json @@ -0,0 +1,1366 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Monitors Kubernetes deployments in cluster using Prometheus and kube-state-metrics. Shows resource utilization of deployments, daemonsets, and statefulsets.", + "editable": true, + "gnetId": 8588, + "graphTooltip": 0, + "id": 2, + "iteration": 1586200623748, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 0 + }, + "height": "180px", + "id": 1, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (container_memory_working_set_bytes{container!=\"\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\", kubernetes_io_hostname=~\"^$Node$\", pod_name!=\"\"}) / sum (kube_node_status_allocatable_memory_bytes{node=~\"^$Node.*$\"}) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 900 + } + ], + "thresholds": "65, 90", + "title": "Deployment memory usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 0 + }, + "height": "180px", + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\", kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 900 + } + ], + "thresholds": "65, 90", + "title": "Deployment CPU usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 0 + }, + "height": "180px", + "id": 3, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(((sum(kube_deployment_status_replicas{deployment=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_statefulset_replicas{statefulset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_daemonset_status_desired_number_scheduled{daemonset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0))) - ((sum(kube_deployment_status_replicas_available{deployment=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_statefulset_status_replicas{statefulset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_daemonset_status_number_ready{daemonset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)))) / ((sum(kube_deployment_status_replicas{deployment=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_statefulset_replicas{statefulset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_daemonset_status_desired_number_scheduled{daemonset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0))) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "1,30", + "title": "Unavailable Replicas", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 5 + }, + "height": "100px", + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{container!=\"\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\", kubernetes_io_hostname=~\"^$Node$\", pod_name!=\"\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 5 + }, + "height": "100px", + "id": 5, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (kube_node_status_allocatable_memory_bytes{node=~\"^$Node.*$\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 5 + }, + "height": "100px", + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (rate (container_cpu_usage_seconds_total{pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\", kubernetes_io_hostname=~\"^$Node$\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 5 + }, + "height": "100px", + "id": 7, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 5 + }, + "height": "100px", + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(kube_deployment_status_replicas_available{deployment=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_statefulset_status_replicas{statefulset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_daemonset_status_number_ready{daemonset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0))", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Available (cluster)", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "default-kubecost", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 5 + }, + "height": "100px", + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(kube_deployment_status_replicas{deployment=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_statefulset_replicas{statefulset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0)) + (sum(kube_daemonset_status_desired_number_scheduled{daemonset=~\".*$Deployment$Statefulset$Daemonset\"}) or vector(0))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ $Daemonset }}", + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Total (cluster)", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 8 + }, + "height": "", + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/avlbl.*/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (container_cpu_usage_seconds_total{container!=\"\",image!=\"\",name=~\"^k8s_.*\",io_kubernetes_container_name!=\"POD\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\",kubernetes_io_hostname=~\"^$Node$\"}[5m])) by (pod_name,kubernetes_io_hostname)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "usage: {{ kubernetes_io_hostname }} | {{ pod_name }} ", + "metric": "container_cpu", + "refId": "A", + "step": 60 + }, + { + "expr": "sum (kube_pod_container_resource_requests_cpu_cores{pod=~\"^$Deployment$Statefulset$Daemonset.*$\",node=~\"^$Node$\"}) by (pod,node)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "rqst: {{ node }} | {{ pod }}", + "refId": "B", + "step": 120 + }, + { + "expr": "sum ((kube_node_status_allocatable_cpu_cores{node=~\"^$Node$\"})) by (node)", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "avlbl: {{ node }}", + "refId": "C", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage & requests", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/^avlbl.*$/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max (container_memory_working_set_bytes{container!=\"POD\",container!=\"\",id!=\"/\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name,kubernetes_io_hostname)", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "usage: {{kubernetes_io_hostname }} | {{ pod_name }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 60 + }, + { + "expr": "sum ((kube_pod_container_resource_requests_memory_bytes{pod=~\"^$Deployment$Statefulset$Daemonset.*$\",node=~\"^$Node$\"})) by (pod,node)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "rqst: {{ node }} | {{ pod }}", + "refId": "B", + "step": 120 + }, + { + "expr": "sum ((kube_node_status_allocatable_memory_bytes{node=~\"^$Node$\"})) by (node)", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "avlbl: {{ node }}", + "refId": "C", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory usage & requests", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "100 * (kubelet_volume_stats_used_bytes{kubernetes_io_hostname=~\"^$Node$\", persistentvolumeclaim=~\".*$Deployment$Statefulset$Daemonset.*$\"} / kubelet_volume_stats_capacity_bytes{kubernetes_io_hostname=~\"^$Node$\", persistentvolumeclaim=~\".*$Deployment$Statefulset$Daemonset.*$\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ persistentvolumeclaim }} | {{ kubernetes_io_hostname }}", + "refId": "A", + "step": 120 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 41 + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name, kubernetes_io_hostname)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ kubernetes_io_hostname }} | {{ pod_name }}", + "metric": "network", + "refId": "A", + "step": 60 + }, + { + "expr": "- sum( rate (container_network_transmit_bytes_total{id!=\"/\",pod_name=~\"^$Deployment$Statefulset$Daemonset.*$\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name, kubernetes_io_hostname)", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ kubernetes_io_hostname }} | {{ pod_name }}", + "metric": "network", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "All processes network I/O", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "kubernetes", + "deployment" + ], + "templating": { + "list": [ + { + "allValue": "()", + "current": { + "selected": false, + "tags": [], + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Deployment", + "options": [], + "query": "label_values(deployment)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "()", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Statefulset", + "options": [], + "query": "label_values(statefulset)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "()", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Daemonset", + "options": [], + "query": "label_values(daemonset)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Node", + "options": [], + "query": "label_values(kubernetes_io_hostname)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Deployment/Statefulset/Daemonset utilization metrics", + "uid": "deployment-metrics", + "version": 1 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/label-cost-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/label-cost-utilization.json new file mode 100644 index 000000000..46d508aa0 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/label-cost-utilization.json @@ -0,0 +1,1193 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 7, + "iteration": 1586214000479, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Monthly projected CPU cost given last 10m", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 0, + "y": 0 + }, + "hideTimeOverride": true, + "id": 15, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(\n avg(container_cpu_allocation) by (pod,node)\n\n * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[10m])) by (node)\n\n * on (pod) group_left()\n label_replace(\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod),\n \"pod_name\",\n \"$1\", \n \"pod\", \n \"(.+)\"\n )\n) * 730", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "CPU Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Based on CPU usage over last 24 hours", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 6, + "y": 0 + }, + "hideTimeOverride": true, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(\n avg(container_memory_allocation_bytes) by (pod,node) / 1024 / 1024 / 1024\n\n * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[10m])) by (node)\n\n * on (pod) group_left()\n label_replace(\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod),\n \"pod_name\",\n \"$1\", \n \"pod\", \n \"(.+)\"\n )\n) * 730", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Memory Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 12, + "y": 0 + }, + "hideTimeOverride": true, + "id": 21, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "sum(\n sum(kube_persistentvolumeclaim_info{storageclass!=\".*ssd.*\"}) by (persistentvolumeclaim, storageclass)\n * on (persistentvolumeclaim) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim)\n * on (persistentvolumeclaim) group_left(label_app)\n max(kube_persistentvolumeclaim_labels{label_$label=~\"$label_value\"}) by (persistentvolumeclaim) or up * 0\n) / 1024 / 1024 /1024 * .04 \n\n+\n\nsum(\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, storageclass)\n * on (persistentvolumeclaim) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim)\n * on (persistentvolumeclaim) group_left(label_app)\n max(kube_persistentvolumeclaim_labels{label_$label=~\"$label_value\"}) by (persistentvolumeclaim) or up * 0\n) / 1024 / 1024 /1024 * .17 \n", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Storage Cost", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "description": "Cost of memory + CPU usage", + "format": "currencyUSD", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 18, + "y": 0 + }, + "hideTimeOverride": true, + "id": 20, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "label_cloud_google_com_gke_preemptible", + "targets": [ + { + "expr": "# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CPU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nsum(\n avg(container_cpu_allocation) by (pod,node)\n\n * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[10m])) by (node)\n\n * on (pod) group_left()\n label_replace(\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod),\n \"pod_name\",\n \"$1\", \n \"pod\", \n \"(.+)\"\n )\n) * 730\n\n#END CPU\n+\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Memory ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nsum(\n avg(container_memory_allocation_bytes) by (pod,node) / 1024 / 1024 / 1024\n\n * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[10m])) by (node)\n\n * on (pod) group_left()\n label_replace(\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod),\n \"pod_name\",\n \"$1\", \n \"pod\", \n \"(.+)\"\n )\n) * 730\n\n# END MEMORY\n\n+\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ STORAGE ~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsum(\n sum(kube_persistentvolumeclaim_info{storageclass!=\".*ssd.*\"}) by (persistentvolumeclaim, storageclass)\n * on (persistentvolumeclaim) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim)\n * on (persistentvolumeclaim) group_left(label_app)\n max(kube_persistentvolumeclaim_labels{label_$label=~\"$label_value\"}) by (persistentvolumeclaim) or up * 0\n) / 1024 / 1024 /1024 * .04 \n\n+\n\nsum(\n sum(kube_persistentvolumeclaim_info{storageclass=~\".*ssd.*\"}) by (persistentvolumeclaim, storageclass)\n * on (persistentvolumeclaim) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim)\n * on (persistentvolumeclaim) group_left(label_app)\n max(kube_persistentvolumeclaim_labels{label_$label=~\"$label_value\"}) by (persistentvolumeclaim) or up * 0\n) / 1024 / 1024 /1024 * .17 \n\n\n# END STORAGE\n", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": " {{ node }}", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": "15m", + "timeShift": null, + "title": "Total Cost", + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 5 + }, + "id": 10, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n sum (kube_pod_container_resource_requests_cpu_cores) by (pod)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod)\n or up * 0\n) ", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "CPU Request", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 2, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 5 + }, + "id": 17, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n label_replace(\n sum(rate(container_cpu_usage_seconds_total{image!=\"\",container_name!=\"POD\"}[1h])) by (kubernetes_io_hostname,pod_name),\n \"node\",\n \"$1\", \n \"kubernetes_io_hostname\", \n \"(.+)\"\n ) \n * on (pod_name) group_left()\n label_replace(\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod),\n \"pod_name\",\n \"$1\", \n \"pod\", \n \"(.+)\"\n ) or up * 0\n) ", + "format": "time_series", + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "", + "title": "CPU Used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 0, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 5 + }, + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n sum (kube_pod_container_resource_requests_memory_bytes) by (pod)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod)\n or up * 0\n) ", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Memory Request", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 5 + }, + "id": 18, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n label_replace(\n sum (container_memory_working_set_bytes{pod_name!=\"\",container!=\"POD\",container!=\"\"}) by (pod_name),\n \"pod\",\n \"$1\", \n \"pod_name\", \n \"(.+)\")\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod)\n or up * 0\n)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Memory Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "default-kubecost", + "decimals": 0, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 5 + }, + "id": 22, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(\n max(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, storageclass)\n * on (persistentvolumeclaim) group_right(storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes) by (persistentvolumeclaim)\n * on (persistentvolumeclaim) group_left(label_app)\n max(kube_persistentvolumeclaim_labels{label_$label=~\"$label_value\"}) by (persistentvolumeclaim) or up * 0\n) \n", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Storage Request", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n sum (kube_pod_container_resource_limits_cpu_cores) by (pod, container)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container),\n \"container_name\",\n \"$1\", \n \"container\", \n \"(.+)\"\n )\n) \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "limit", + "refId": "C" + }, + { + "expr": "sum(\n label_replace(\n sum (kube_pod_container_resource_requests_cpu_cores) by (pod, container)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container),\n \"container_name\",\n \"$1\", \n \"container\", \n \"(.+)\"\n )\n) \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "request", + "refId": "B" + }, + { + "expr": "sum(\n label_replace(\n sum (rate (container_cpu_usage_seconds_total{image!=\"\",container!=\"POD\",container!=\"\"}[10m])) by (container,pod),\n \"pod\", \n \"$1\", \n \"pod_name\", \n \"(.+)\"\n )\n * on (pod) group_left (label_$label)\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container)\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "usage", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage vs Requests vs Limits", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n sum (kube_pod_container_resource_limits_memory_bytes) by (pod, container)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container),\n \"container_name\",\n \"$1\", \n \"container\", \n \"(.+)\"\n )\n) \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "limit", + "refId": "C" + }, + { + "expr": "sum(\n label_replace(\n sum (kube_pod_container_resource_requests_memory_bytes) by (pod, container)\n * on (pod) group_left()\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container),\n \"container_name\",\n \"$1\", \n \"container\", \n \"(.+)\"\n )\n) \n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "request", + "refId": "B" + }, + { + "expr": "sum(\n label_replace(\n sum (container_memory_working_set_bytes{container!=\"\",container!=\"POD\"}) by (container,pod),\n \"pod\", \n \"$1\", \n \"pod_name\", \n \"(.+)\"\n )\n * on (pod) group_left (label_$label)\n max(kube_pod_labels{label_$label=~\"$label_value\"}) by (pod,container)\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "usage", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage vs Requests vs Limits", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "cost", + "utilization", + "metrics" + ], + "templating": { + "list": [ + { + "datasource": "default-kubecost", + "filters": [], + "hide": 0, + "label": "", + "name": "Filters", + "skipUrlSync": false, + "type": "adhoc" + }, + { + "allValue": null, + "current": { + "tags": [], + "text": "app", + "value": "app" + }, + "hide": 0, + "includeAll": false, + "label": "Label", + "multi": false, + "name": "label", + "options": [ + { + "selected": false, + "text": "app", + "value": "app" + }, + { + "selected": false, + "text": "tier", + "value": "tier" + }, + { + "selected": false, + "text": "component", + "value": "component" + }, + { + "selected": true, + "text": "release", + "value": "release" + }, + { + "selected": false, + "text": "name", + "value": "name" + }, + { + "selected": false, + "text": "team", + "value": "team" + }, + { + "selected": false, + "text": "department", + "value": "department" + }, + { + "selected": false, + "text": "owner", + "value": "owner" + }, + { + "selected": false, + "text": "contact", + "value": "contact" + } + ], + "query": "app, tier, component, release, name, team, department, owner, contact", + "skipUrlSync": false, + "type": "custom" + }, + { + "allValue": ".*", + "current": { + "tags": [], + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": "Value", + "multi": false, + "name": "label_value", + "options": [], + "query": "query_result(SUM(kube_pod_labels{label_$label!=\"\",namespace!=\"kube-system\"}) by (label_$label))", + "refresh": 1, + "regex": "/label_$label=\\\"(.*?)(\\\")/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "()", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": "", + "multi": false, + "name": "Deployments", + "options": [], + "query": "label_values(deployment)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "tags": [], + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "Secondary", + "options": [ + { + "selected": true, + "text": "All", + "value": "$__all" + }, + { + "selected": false, + "text": "app", + "value": "app" + }, + { + "selected": false, + "text": "component", + "value": "component" + }, + { + "selected": false, + "text": "controller_revision_hash", + "value": "controller_revision_hash" + }, + { + "selected": false, + "text": "k8s_app", + "value": "k8s_app" + } + ], + "query": "query_result(kube_pod_labels)", + "refresh": 0, + "regex": "/.+?label_([^=]*).*/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Label costs & utilization", + "uid": "lWMhIA-ik", + "version": 2 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/namespace-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/namespace-utilization.json new file mode 100644 index 000000000..7260cced1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/namespace-utilization.json @@ -0,0 +1,1156 @@ +{ + "annotations":{ + "list":[ + { + "builtIn":1, + "datasource":"-- Grafana --", + "enable":true, + "hide":true, + "iconColor":"rgba(0, 211, 255, 1)", + "name":"Annotations & Alerts", + "type":"dashboard" + } + ] + }, + "description":"A dashboard to help with utilization and resource allocation", + "editable":true, + "gnetId":8673, + "graphTooltip":0, + "id":9, + "iteration":1553150922105, + "links":[ + + ], + "panels":[ + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "fontSize":"100%", + "gridPos":{ + "h":9, + "w":16, + "x":0, + "y":0 + }, + "hideTimeOverride":true, + "id":73, + "links":[ + + ], + "pageSize":8, + "repeat":null, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":2, + "desc":false + }, + "styles":[ + { + "alias":"Pod", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":false, + "linkTooltip":"", + "linkUrl":"", + "pattern":"pod_name", + "thresholds":[ + "30", + "80" + ], + "type":"string", + "unit":"currencyUSD" + }, + { + "alias":"RAM", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "pattern":"Value #B", + "thresholds":[ + + ], + "type":"number", + "unit":"decbytes" + }, + { + "alias":"CPU %", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + + ], + "type":"number", + "unit":"percent" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"Storage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"Total", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #D", + "thresholds":[ + + ], + "type":"number", + "unit":"currencyUSD" + }, + { + "alias":"CPU Utilization", + "colorMode":"value", + "colors":[ + "#bf1b00", + "rgba(50, 172, 45, 0.97)", + "#ef843c" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #E", + "thresholds":[ + "30", + "80" + ], + "type":"number", + "unit":"percent" + }, + { + "alias":"RAM Utilization", + "colorMode":"value", + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#ef843c" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #F", + "thresholds":[ + "30", + "80" + ], + "type":"number", + "unit":"percent" + } + ], + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{namespace=\"$namespace\"}[10m])) by (pod_name) * 100", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"{{ pod_name }}", + "refId":"A" + }, + { + "expr":"sum (avg_over_time (container_memory_working_set_bytes{namespace=\"$namespace\", container_name!=\"POD\"}[10m])) by (pod_name)", + "format":"table", + "hide":false, + "instant":true, + "intervalFactor":1, + "legendFormat":"{{ pod_name }}", + "refId":"B" + } + ], + "timeFrom":"1M", + "timeShift":null, + "title":"Pod utilization analysis", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "fontSize":"100%", + "gridPos":{ + "h":9, + "w":8, + "x":16, + "y":0 + }, + "hideTimeOverride":true, + "id":90, + "links":[ + + ], + "pageSize":8, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":4, + "desc":true + }, + "styles":[ + { + "alias":"Namespace", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"namespace", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"PVC Name", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"persistentvolumeclaim", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Storage Class", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"storageclass", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Size", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":1, + "mappingType":1, + "pattern":"Value", + "thresholds":[ + + ], + "type":"number", + "unit":"gbytes" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + } + ], + "targets":[ + { + "expr":"sum (\n sum(kube_persistentvolumeclaim_info) by (persistentvolumeclaim, namespace, storageclass)\n + on (persistentvolumeclaim, namespace) group_right (storageclass)\n sum(kube_persistentvolumeclaim_resource_requests_storage_bytes{namespace=~\"$namespace\"}) by (persistentvolumeclaim, namespace)\n) by (namespace,persistentvolumeclaim,storageclass) / 1024 / 1024 /1024 ", + "format":"table", + "hide":false, + "instant":true, + "interval":"", + "intervalFactor":1, + "legendFormat":"{{ persistentvolumeclaim }}", + "refId":"A" + } + ], + "timeFrom":null, + "timeShift":null, + "title":"Persistent Volume Claims", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "description":"CPU requests by pod divided by the rate of CPU usage over the last hour", + "fill":1, + "gridPos":{ + "h":9, + "w":24, + "x":0, + "y":9 + }, + "id":100, + "legend":{ + "avg":false, + "current":false, + "max":false, + "min":false, + "show":true, + "total":false, + "values":false + }, + "lines":true, + "linewidth":1, + "links":[ + + ], + "nullPointMode":"null", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"topk(10,\n label_replace(\n sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\"}) by (pod),\n \"pod_name\", \n \"$1\", \n \"pod\", \n \"(.+)\"\n ) \n/ on (pod_name) sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\",pod_name=~\".+\"}[1h])) by (pod_name))", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":[ + + ], + "timeFrom":null, + "timeShift":null, + "title":"Ratio of CPU requests to usage (Top 10 pods)", + "tooltip":{ + "shared":true, + "sort":0, + "value_type":"individual" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":true + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":3, + "description":"This panel shows historical utilization as an average across all pods in this namespace. It only accounts for currently deployed pods", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":0, + "y":18 + }, + "height":"", + "id":94, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":false, + "current":false, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":false, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (rate (container_cpu_usage_seconds_total{namespace=\"$namespace\"}[10m])) by (namespace)\n", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"cpu utilization", + "metric":"container_cpu", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Overall CPU Utilization", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"percent", + "label":"", + "logBase":1, + "max":"110", + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"This panel shows historical utilization as an average across all pods in this namespace. It only accounts for currently deployed pods", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":12, + "y":18 + }, + "id":92, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":false, + "current":false, + "max":false, + "min":false, + "rightSide":false, + "show":false, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum (container_memory_working_set_bytes{namespace=\"$namespace\"})\n/\nsum(node_memory_MemTotal_bytes)", + "format":"time_series", + "instant":false, + "intervalFactor":1, + "legendFormat":"mem utilization", + "refId":"B" + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Overall RAM Utilization", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"percent", + "label":null, + "logBase":1, + "max":"110", + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"Traffic in and out of this namespace, as a sum of the pods within it", + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":0, + "y":24 + }, + "height":"", + "id":96, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":true, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_network_receive_bytes_total{namespace=\"$namespace\"}[10m])) by (namespace)", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"<- in", + "metric":"container_cpu", + "refId":"A", + "step":10 + }, + { + "expr":"- sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\"}[10m])) by (namespace)", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"-> out", + "refId":"B" + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Network IO", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":"", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"Disk reads and writes for the namespace, as a sum of the pods within it", + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":12, + "y":24 + }, + "height":"", + "id":98, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":true, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_fs_writes_bytes_total{namespace=\"$namespace\"}[10m])) by (namespace)", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"<- write", + "metric":"container_cpu", + "refId":"A", + "step":10 + }, + { + "expr":"- sum (rate (container_fs_reads_bytes_total{namespace=\"$namespace\"}[10m])) by (namespace)", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"-> read", + "refId":"B" + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Disk IO", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":"", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "refresh":"10s", + "schemaVersion":16, + "style":"dark", + "tags":[ + "cost", + "utilization", + "metrics" + ], + "templating":{ + "list":[ + { + "current":{ + "text":"23.06", + "value":"23.06" + }, + "hide":0, + "label":"CPU", + "name":"costcpu", + "options":[ + { + "text":"23.06", + "value":"23.06" + } + ], + "query":"23.06", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"7.28", + "value":"7.28" + }, + "hide":0, + "label":"PE CPU", + "name":"costpcpu", + "options":[ + { + "text":"7.28", + "value":"7.28" + } + ], + "query":"7.28", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"3.25", + "value":"3.25" + }, + "hide":0, + "label":"RAM", + "name":"costram", + "options":[ + { + "text":"3.25", + "value":"3.25" + } + ], + "query":"3.25", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"0.6862", + "value":"0.6862" + }, + "hide":0, + "label":"PE RAM", + "name":"costpram", + "options":[ + { + "text":"0.6862", + "value":"0.6862" + } + ], + "query":"0.6862", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"0.04", + "value":"0.04" + }, + "hide":0, + "label":"Storage", + "name":"costStorageStandard", + "options":[ + { + "text":"0.04", + "value":"0.04" + } + ], + "query":"0.04", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":".17", + "value":".17" + }, + "hide":0, + "label":"SSD", + "name":"costStorageSSD", + "options":[ + { + "text":".17", + "value":".17" + } + ], + "query":".17", + "skipUrlSync":false, + "type":"constant" + }, + { + "current":{ + "text":"30", + "value":"30" + }, + "hide":0, + "label":"Disc.", + "name":"costDiscount", + "options":[ + { + "text":"30", + "value":"30" + } + ], + "query":"30", + "skipUrlSync":false, + "type":"constant" + }, + { + "allValue":null, + "current":{ + "text":"kube-system", + "value":"kube-system" + }, + "datasource":"default-kubecost", + "hide":0, + "includeAll":false, + "label":"NS", + "multi":false, + "name":"namespace", + "options":[ + + ], + "query":"query_result(sum(kube_namespace_created{namespace!=\"\"}) by (namespace))", + "refresh":1, + "regex":"/namespace=\\\"(.*?)(\\\")/", + "skipUrlSync":false, + "sort":0, + "tagValuesQuery":"", + "tags":[ + + ], + "tagsQuery":"", + "type":"query", + "useTags":false + }, + { + "datasource":"default-kubecost", + "filters":[ + + ], + "hide":0, + "label":"", + "name":"Filters", + "skipUrlSync":false, + "type":"adhoc" + } + ] + }, + "time":{ + "from":"now-15m", + "to":"now" + }, + "timepicker":{ + "hidden":false, + "refresh_intervals":[ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options":[ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone":"browser", + "title":"Namespace utilization metrics", + "uid":"at-cost-analysis-namespace2", + "version":1 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/node-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/node-utilization.json new file mode 100644 index 000000000..ce751c199 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/node-utilization.json @@ -0,0 +1,1370 @@ +{ + "annotations":{ + "list":[ + { + "builtIn":1, + "datasource":"-- Grafana --", + "enable":true, + "hide":true, + "iconColor":"rgba(0, 211, 255, 1)", + "name":"Annotations & Alerts", + "type":"dashboard" + } + ] + }, + "editable":true, + "gnetId":null, + "graphTooltip":0, + "id":6, + "iteration":1557245882378, + "links":[ + + ], + "panels":[ + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"percentunit", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":7, + "w":8, + "x":0, + "y":0 + }, + "id":2, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum(irate(container_cpu_usage_seconds_total{id=\"/\",instance=\"$node\"}[10m]))", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"CPU Usage", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"avg" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"percentunit", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":7, + "w":8, + "x":8, + "y":0 + }, + "id":3, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"SUM(container_memory_usage_bytes{namespace!=\"\",instance=\"$node\"}) / SUM(kube_node_status_capacity_memory_bytes{node=\"$node\"})", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"Memory Usage", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"avg" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"percentunit", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":true, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":7, + "w":8, + "x":16, + "y":0 + }, + "id":4, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum(container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",instance=\"$node\"}) /\nsum(container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",instance=\"$node\"})", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"Storage Usage", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"avg" + }, + { + "columns":[ + { + "text":"Avg", + "value":"avg" + } + ], + "datasource":"default-kubecost", + "fontSize":"100%", + "gridPos":{ + "h":8, + "w":16, + "x":0, + "y":7 + }, + "hideTimeOverride":true, + "id":21, + "links":[ + + ], + "pageSize":8, + "repeat":null, + "repeatDirection":"v", + "scroll":true, + "showHeader":true, + "sort":{ + "col":4, + "desc":true + }, + "styles":[ + { + "alias":"Pod", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "link":false, + "linkTooltip":"", + "linkUrl":"", + "pattern":"pod_name", + "thresholds":[ + "30", + "80" + ], + "type":"string", + "unit":"currencyUSD" + }, + { + "alias":"", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Time", + "thresholds":[ + + ], + "type":"hidden", + "unit":"short" + }, + { + "alias":"CPU Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #C", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"CPU Request", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #A", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"CPU Limit", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #B", + "thresholds":[ + + ], + "type":"number", + "unit":"short" + }, + { + "alias":"Mem Usage", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #D", + "thresholds":[ + + ], + "type":"number", + "unit":"bytes" + }, + { + "alias":"Mem Request", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #E", + "thresholds":[ + + ], + "type":"number", + "unit":"bytes" + }, + { + "alias":"Mem Limit", + "colorMode":null, + "colors":[ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat":"YYYY-MM-DD HH:mm:ss", + "decimals":2, + "mappingType":1, + "pattern":"Value #F", + "thresholds":[ + + ], + "type":"number", + "unit":"bytes" + } + ], + "targets":[ + { + "expr":"sum(rate(container_cpu_usage_seconds_total{container_name!=\"\",container_name!=\"POD\",pod_name!=\"\",instance=\"$node\"}[24h])) by (pod_name)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"C" + }, + { + "expr":"sum(label_replace(\nsum(avg_over_time(kube_pod_container_resource_requests_cpu_cores{container!=\"\",container!=\"POD\",node=\"$node\"}[24h])) by (pod), \n\"pod_name\",\"$1\",\"pod\",\"(.+)\")\nor up * 0\n) by (pod_name)", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"A" + }, + { + "expr":"sum(avg_over_time(container_memory_usage_bytes{container_name!=\"\",container_name!=\"POD\",pod_name!=\"\",instance=\"$node\"}[24h])) by (pod_name)\n", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"D" + }, + { + "expr":"sum(label_replace(label_replace(\nsum(avg_over_time(kube_pod_container_resource_requests_memory_bytes{container!=\"\",container!=\"POD\",node=\"$node\"}[24h])) by (pod),\n\"container_name\",\"$1\",\"container\",\"(.+)\"), \"pod_name\",\"$1\",\"pod\",\"(.+)\")\nor up * 0\n) by (pod_name)\n", + "format":"table", + "instant":true, + "intervalFactor":1, + "refId":"E" + } + ], + "timeFrom":"1M", + "timeShift":null, + "title":"Current pods", + "transform":"table", + "transparent":false, + "type":"table" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "decimals":0, + "format":"none", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":4, + "x":16, + "y":7 + }, + "id":8, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum(\n count(avg_over_time(kube_pod_container_resource_requests_cpu_cores{container!=\"\",container!=\"POD\",node=\"$node\"}[24h])) by (pod)\n * on (pod) group_right()\n sum(kube_pod_container_status_running) by (pod)\n)", + "format":"time_series", + "instant":true, + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"Pods Running", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":4, + "x":20, + "y":7 + }, + "id":18, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"sum(container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",instance=\"$node\"})", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"Storage Capacity", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"none", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":4, + "x":16, + "y":11 + }, + "id":9, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"kube_node_status_capacity_cpu_cores{node=\"$node\"}", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"CPU Capacity", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"avg" + }, + { + "cacheTimeout":null, + "colorBackground":false, + "colorValue":false, + "colors":[ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource":"default-kubecost", + "format":"bytes", + "gauge":{ + "maxValue":100, + "minValue":0, + "show":false, + "thresholdLabels":false, + "thresholdMarkers":true + }, + "gridPos":{ + "h":4, + "w":4, + "x":20, + "y":11 + }, + "id":19, + "interval":null, + "links":[ + + ], + "mappingType":1, + "mappingTypes":[ + { + "name":"value to text", + "value":1 + }, + { + "name":"range to text", + "value":2 + } + ], + "maxDataPoints":100, + "nullPointMode":"connected", + "nullText":null, + "postfix":"", + "postfixFontSize":"50%", + "prefix":"", + "prefixFontSize":"50%", + "rangeMaps":[ + { + "from":"null", + "text":"N/A", + "to":"null" + } + ], + "sparkline":{ + "fillColor":"rgba(31, 118, 189, 0.18)", + "full":false, + "lineColor":"rgb(31, 120, 193)", + "show":false + }, + "tableColumn":"", + "targets":[ + { + "expr":"kube_node_status_capacity_memory_bytes{node=\"$node\"}", + "format":"time_series", + "intervalFactor":1, + "refId":"A" + } + ], + "thresholds":"", + "title":"RAM Capacity", + "type":"singlestat", + "valueFontSize":"80%", + "valueMaps":[ + { + "op":"=", + "text":"N/A", + "value":"null" + } + ], + "valueName":"current" + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":3, + "description":"This panel shows historical utilization for the node.", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":0, + "y":15 + }, + "height":"", + "id":11, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":false, + "current":false, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":false, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"sum(irate(container_cpu_usage_seconds_total{id=\"/\",instance=\"$node\"}[10m]))", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"cpu utilization", + "metric":"container_cpu", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"CPU Utilization", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"percentunit", + "label":"", + "logBase":1, + "max":"1.1", + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"This panel shows historical utilization for the node.", + "editable":true, + "error":false, + "fill":0, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":12, + "y":15 + }, + "id":13, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":false, + "current":false, + "max":false, + "min":false, + "rightSide":false, + "show":false, + "sideWidth":200, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":true, + "targets":[ + { + "expr":"SUM(container_memory_usage_bytes{namespace!=\"\",instance=\"$node\"}) / SUM(kube_node_status_capacity_memory_bytes{node=\"$node\"})", + "format":"time_series", + "instant":false, + "interval":"10s", + "intervalFactor":1, + "legendFormat":"ram utilization", + "metric":"container_memory_usage:sort_desc", + "refId":"A", + "step":10 + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"RAM Utilization", + "tooltip":{ + "msResolution":false, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "decimals":null, + "format":"percentunit", + "label":null, + "logBase":1, + "max":"1.1", + "min":"0", + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"Traffic in and out of this namespace, as a sum of the pods within it", + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":0, + "y":21 + }, + "height":"", + "id":15, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":true, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_network_receive_bytes_total{instance=\"$node\"}[10m]))", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"<- in", + "metric":"container_cpu", + "refId":"A", + "step":10 + }, + { + "expr":"- sum (rate (container_network_transmit_bytes_total{instance=\"$node\"}[10m]))", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"-> out", + "refId":"B" + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Network IO", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":"", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + }, + { + "aliasColors":{ + + }, + "bars":false, + "dashLength":10, + "dashes":false, + "datasource":"default-kubecost", + "decimals":2, + "description":"Disk reads and writes for the namespace, as a sum of the pods within it", + "editable":true, + "error":false, + "fill":1, + "grid":{ + + }, + "gridPos":{ + "h":6, + "w":12, + "x":12, + "y":21 + }, + "height":"", + "id":17, + "isNew":true, + "legend":{ + "alignAsTable":false, + "avg":true, + "current":true, + "hideEmpty":false, + "hideZero":false, + "max":false, + "min":false, + "rightSide":false, + "show":true, + "sideWidth":null, + "sort":"current", + "sortDesc":true, + "total":false, + "values":true + }, + "lines":true, + "linewidth":2, + "links":[ + + ], + "nullPointMode":"connected", + "percentage":false, + "pointradius":5, + "points":false, + "renderer":"flot", + "seriesOverrides":[ + + ], + "spaceLength":10, + "stack":false, + "steppedLine":false, + "targets":[ + { + "expr":"sum (rate (container_fs_writes_bytes_total{instance=\"$node\"}[10m]))", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"<- write", + "metric":"container_cpu", + "refId":"A", + "step":10 + }, + { + "expr":"- sum (rate (container_fs_reads_bytes_total{instance=\"$node\"}[10m]))", + "format":"time_series", + "hide":false, + "instant":false, + "interval":"", + "intervalFactor":1, + "legendFormat":"-> read", + "refId":"B" + } + ], + "thresholds":[ + + ], + "timeFrom":"", + "timeShift":null, + "title":"Disk IO", + "tooltip":{ + "msResolution":true, + "shared":true, + "sort":2, + "value_type":"cumulative" + }, + "type":"graph", + "xaxis":{ + "buckets":null, + "mode":"time", + "name":null, + "show":true, + "values":[ + + ] + }, + "yaxes":[ + { + "format":"Bps", + "label":"", + "logBase":1, + "max":null, + "min":null, + "show":true + }, + { + "format":"short", + "label":null, + "logBase":1, + "max":null, + "min":null, + "show":false + } + ], + "yaxis":{ + "align":false, + "alignLevel":null + } + } + ], + "schemaVersion":16, + "style":"dark", + "tags":[ + "cost", + "utilization", + "metrics" + ], + "templating":{ + "list":[ + { + "allValue":null, + "current":{ + "text":"ip-172-20-44-170.us-east-2.compute.internal", + "value":"ip-172-20-44-170.us-east-2.compute.internal" + }, + "datasource":"default-kubecost", + "hide":0, + "includeAll":false, + "label":null, + "multi":false, + "name":"node", + "options":[ + + ], + "query":"query_result(kube_node_labels)", + "refresh":1, + "regex":"/node=\\\"(.*?)(\\\")/", + "skipUrlSync":false, + "sort":0, + "tagValuesQuery":"", + "tags":[ + + ], + "tagsQuery":"", + "type":"query", + "useTags":false + } + ] + }, + "time":{ + "from":"now-6h", + "to":"now" + }, + "timepicker":{ + "refresh_intervals":[ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options":[ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone":"", + "title":"Node utilization metrics", + "uid":"NUQW37Lmk", + "version":1 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/pod-utilization.json b/charts/kubecost/cost-analyzer/1.70.000/pod-utilization.json new file mode 100644 index 000000000..4e0abff2d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/pod-utilization.json @@ -0,0 +1,798 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Visualize your kubernetes costs at the pod level.", + "editable": true, + "gnetId": 9063, + "graphTooltip": 0, + "id": 4, + "iteration": 1560100821196, + "links": [], + "panels": [ + { + "columns": [ + { + "text": "Avg", + "value": "avg" + } + ], + "datasource": "default-kubecost", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 24, + "x": 0, + "y": 0 + }, + "hideTimeOverride": true, + "id": 98, + "links": [], + "pageSize": 5, + "repeatDirection": "v", + "scroll": true, + "showHeader": true, + "sort": { + "col": 6, + "desc": true + }, + "styles": [ + { + "alias": "Container", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", + "#c15c17" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "pattern": "container_name", + "thresholds": [ + "30", + "80" + ], + "type": "string", + "unit": "currencyUSD" + }, + { + "alias": "Memory Allocation", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "CPU Allocation", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "none" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Memory ($/hour)", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "currencyUSD" + }, + { + "alias": "Spot/PE RAM", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "currencyUSD" + }, + { + "alias": "Total", + "colorMode": null, + "colors": [ + "#bf1b00", + "rgba(50, 172, 45, 0.97)", + "#ef843c" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [ + "" + ], + "type": "number", + "unit": "currencyUSD" + } + ], + "targets": [ + { + "expr": "sum(\n avg_over_time(container_memory_allocation_bytes{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range])\n) by (container,node)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "B" + }, + { + "expr": "sum(\n avg_over_time(container_cpu_allocation{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range])\n or up * 0 \n) by (container,node)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "1M", + "timeShift": null, + "title": "Container cost & allocation analysis", + "transform": "table", + "transparent": false, + "type": "table" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 3, + "description": "This graph attempts to show you CPU use of your application vs its requests", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 5 + }, + "height": "", + "id": 94, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg (rate (container_cpu_usage_seconds_total{namespace=~\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\",container_name!=\"\"}[10m])) by (container_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ container_name }} (usage)", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "avg(kube_pod_container_resource_requests_cpu_cores{namespace=~\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{ container}} (request)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": "", + "timeShift": null, + "title": "CPU Usage vs Requested", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 3, + "description": "This graph attempts to show you RAM use of your application vs its requests", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 5 + }, + "height": "", + "id": 96, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "avg (avg_over_time (container_memory_working_set_bytes{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\",container_name!=\"\"}[1m])) by (container_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ container_name }} (usage)", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "avg(kube_pod_container_resource_requests_memory_bytes{namespace=~\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", + "format": "time_series", + "hide": false, + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{ container }} (requested)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": "", + "timeShift": null, + "title": "RAM Usage vs Requested", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "description": "Traffic in and out of this pod, as a sum of its containers", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 12 + }, + "height": "", + "id": 95, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=\"$pod\"}[10m])) by (pod_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "<- in", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "- avg (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=\"$pod\"}[10m])) by (pod_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "-> out", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": "", + "timeShift": null, + "title": "Network IO", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "description": "Disk read writes", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 12 + }, + "height": "", + "id": 97, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (rate (container_fs_writes_bytes_total{namespace=\"$namespace\",pod_name=\"$pod\"}[10m])) by (pod_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "<- write", + "metric": "container_cpu", + "refId": "A", + "step": 10 + }, + { + "expr": "- avg (rate (container_fs_reads_bytes_total{namespace=\"$namespace\",pod_name=\"$pod\"}[10m])) by (pod_name)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "-> read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": "", + "timeShift": null, + "title": "Disk IO", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "cost", + "utilization", + "metrics" + ], + "templating": { + "list": [ + { + "current": { + "text": "0.044", + "value": "0.044" + }, + "hide": 0, + "label": "Storage", + "name": "costStorageStandard", + "options": [ + { + "text": "0.044", + "value": "0.044" + } + ], + "query": "0.044", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "text": "0.187", + "value": "0.187" + }, + "hide": 0, + "label": "SSD", + "name": "costStorageSSD", + "options": [ + { + "text": "0.187", + "value": "0.187" + } + ], + "query": "0.187", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "text": "30", + "value": "30" + }, + "hide": 0, + "label": "Disc.", + "name": "costDiscount", + "options": [ + { + "text": "30", + "value": "30" + } + ], + "query": "30", + "skipUrlSync": false, + "type": "constant" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "kubecost", + "value": "kubecost" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": false, + "label": "NS", + "multi": false, + "name": "namespace", + "options": [], + "query": "query_result(sum(container_memory_working_set_bytes{namespace!=\"\"}) by (namespace))", + "refresh": 1, + "regex": "/namespace=\\\"(.*?)(\\\")/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "tags": [], + "text": "kubecost-grafana-5cc9f5bf6-7kmgl", + "value": "kubecost-grafana-5cc9f5bf6-7kmgl" + }, + "datasource": "default-kubecost", + "hide": 0, + "includeAll": false, + "label": "Pod", + "multi": false, + "name": "pod", + "options": [], + "query": "query_result(sum(container_memory_working_set_bytes{namespace=\"$namespace\"}) by (pod_name))", + "refresh": 1, + "regex": "/pod_name=\\\"(.*?)(\\\")/", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "hidden": false, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Pod cost & utilization metrics", + "uid": "at-cost-analysis-pod", + "version": 1 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/prom-benchmark.json b/charts/kubecost/cost-analyzer/1.70.000/prom-benchmark.json new file mode 100644 index 000000000..83c778bd5 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/prom-benchmark.json @@ -0,0 +1,5670 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Metrics useful for benchmarking and loadtesting Prometheus itself. Designed primarily for Prometheus 2.17.x.", + "editable": true, + "gnetId": 12054, + "graphTooltip": 1, + "id": 9, + "iteration": 1603144824023, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 49, + "panels": [], + "title": "Basics", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 40, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_build_info{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{version}} - {{revision}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Prometheus Version", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 1 + }, + "hiddenSeries": false, + "id": 72, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "time() - process_start_time_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Age", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Uptime", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "dtdurations", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 1 + }, + "hiddenSeries": false, + "id": 107, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "time() - prometheus_config_last_reload_success_timestamp_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Age", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Last Successful Config Reload", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "dtdurations", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 46, + "panels": [], + "title": "Ingestion", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "Time series": "#70dbed" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_series{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Time series", + "metric": "prometheus_local_storage_memory_series", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head Time series", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 9 + }, + "hiddenSeries": false, + "id": 26, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_active_appenders{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Head Appenders", + "metric": "prometheus_local_storage_memory_series", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head Active Appenders", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "samples/s": "#e5a8e2" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 9 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_head_samples_appended_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "samples/s", + "metric": "prometheus_local_storage_ingested_samples_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Samples Appended/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "To persist": "#9AC48A" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/Max.*/", + "fill": 0 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_chunks{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Chunks", + "metric": "prometheus_local_storage_memory_chunks", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head Chunks", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 16 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_head_chunks_created_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Created", + "metric": "prometheus_local_storage_chunk_ops_total", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_head_chunks_removed_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Removed", + "metric": "prometheus_local_storage_chunk_ops_total", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head Chunks/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "Removed": "#e5ac0e" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 16 + }, + "hiddenSeries": false, + "id": 25, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_isolation_high_watermark{job=\"prometheus\",instance=\"$Prometheus:9090\"} - prometheus_tsdb_isolation_low_watermark{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Difference", + "metric": "prometheus_local_storage_chunk_ops_total", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Isolation Watermarks", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 52, + "panels": [], + "title": "Compaction", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max": "#447ebc", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "Min": "#447ebc", + "Now": "#7eb26d" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 28, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Max", + "fillBelowTo": "Min", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_min_time{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Min", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + }, + { + "expr": "time() * 1000", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Now", + "refId": "C" + }, + { + "expr": "prometheus_tsdb_head_max_time{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Max", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head Time Range", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "dateTimeAsIso", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 24 + }, + "hiddenSeries": false, + "id": 29, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_head_gc_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "GC Time/s", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Head GC Time/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 24 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Queue length", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_blocks_loaded{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Blocks Loaded", + "metric": "prometheus_local_storage_indexing_batch_sizes_sum", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Blocks Loaded", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Failed Compactions": "#bf1b00", + "Failed Reloads": "#bf1b00", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 31 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_reloads_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Reloads", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "TSDB Reloads/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Failed Compactions": "#bf1b00", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 31 + }, + "hiddenSeries": false, + "id": 31, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_wal_fsync_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m]) / rate(prometheus_tsdb_wal_fsync_duration_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Fsync Latency", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_wal_truncate_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m]) / rate(prometheus_tsdb_wal_trunacte_duration_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Truncate Latency", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "WAL Fsync&Truncate Latency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Failed Compactions": "#bf1b00", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "{instance=\"demo.robustperception.io:9090\",job=\"prometheus\"}": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 31 + }, + "hiddenSeries": false, + "id": 32, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_wal_corruptions_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "WAL Corruptions", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_reloads_failures_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Reload Failures", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "B", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_head_series_not_found{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Head Series Not Found", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "C", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_compactions_failed_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Compaction Failures", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "D", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_retention_cutoffs_failures_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Retention Cutoff Failures", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "E", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_checkpoint_creations_failed_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "WAL Checkpoint Creation Failures", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "F", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_checkpoint_deletions_failed_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "WAL Checkpoint Deletion Failures", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "G", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "TSDB Problems/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Failed Compactions": "#bf1b00", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_compactions_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Compactions", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Compactions/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 38 + }, + "hiddenSeries": false, + "id": 33, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_compaction_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Compaction Time/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Allocated bytes": "#F9BA8F", + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "RSS": "#890F02" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 38 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_time_retentions_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Time Cutoffs", + "metric": "last", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(prometheus_tsdb_size_retentions_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Size Cutoffs", + "metric": "last", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Retention Cutoffs/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 45 + }, + "hiddenSeries": false, + "id": 27, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_compaction_chunk_range_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m]) / rate(prometheus_tsdb_compaction_chunk_range_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Chunk Time Range", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "First Compaction, Avg Chunk Time Range", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "dtdurationms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 45 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_compaction_chunk_size_bytes_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m]) / rate(prometheus_tsdb_compaction_chunk_samples_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Bytes/Sample", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "First Compaction, Avg Bytes/Sample", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 45 + }, + "hiddenSeries": false, + "id": 34, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_compaction_chunk_samples_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m]) / rate(prometheus_tsdb_compaction_chunk_samples_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[10m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Chunk Samples", + "metric": "prometheus_local_storage_series_chunks_persisted_count", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "First Compaction, Avg Chunk Samples", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 55, + "panels": [], + "title": "Resource Usage", + "type": "row" + }, + { + "aliasColors": { + "Allocated bytes": "#7EB26D", + "Allocated bytes - 1m max": "#BF1B00", + "Allocated bytes - 1m min": "#BF1B00", + "Allocated bytes - 5m max": "#BF1B00", + "Allocated bytes - 5m min": "#BF1B00", + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "RSS": "#447EBC" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": null, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 53 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/-/", + "fill": 0 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "intervalFactor": 2, + "legendFormat": "RSS", + "metric": "process_resident_memory_bytes", + "refId": "B", + "step": 10 + }, + { + "expr": "prometheus_local_storage_target_heap_size_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "intervalFactor": 2, + "legendFormat": "Target heap size", + "metric": "go_memstats_alloc_bytes", + "refId": "D", + "step": 10 + }, + { + "expr": "go_memstats_next_gc_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "intervalFactor": 2, + "legendFormat": "Next GC", + "metric": "go_memstats_next_gc_bytes", + "refId": "C", + "step": 10 + }, + { + "expr": "go_memstats_alloc_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "intervalFactor": 2, + "legendFormat": "Allocated", + "metric": "go_memstats_alloc_bytes", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Allocated bytes": "#F9BA8F", + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833", + "RSS": "#890F02" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 53 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(go_memstats_alloc_bytes_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Allocated Bytes/s", + "metric": "go_memstats_alloc_bytes", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Allocations", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 53 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(process_cpu_seconds_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "intervalFactor": 2, + "legendFormat": "Irate", + "metric": "prometheus_local_storage_ingested_samples_total", + "refId": "A", + "step": 10 + }, + { + "expr": "rate(process_cpu_seconds_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[5m])", + "intervalFactor": 2, + "legendFormat": "5m rate", + "metric": "prometheus_local_storage_ingested_samples_total", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + "avg" + ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 60 + }, + "hiddenSeries": false, + "id": 70, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_symbol_table_size_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RAM Used", + "metric": "prometheus_local_storage_ingested_samples_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Symbol Tables Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + "avg" + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 60 + }, + "hiddenSeries": false, + "id": 71, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_storage_blocks_bytes_total{job=\"prometheus\",instance=\"$Prometheus:9090\"} or prometheus_tsdb_storage_blocks_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Disk Used", + "metric": "prometheus_local_storage_ingested_samples_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Block Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + "avg" + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Max": "#e24d42", + "Open": "#508642" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 60 + }, + "hiddenSeries": false, + "id": 41, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_max_fds{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Max", + "refId": "A" + }, + { + "expr": "process_open_fds{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Open", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "File Descriptors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 91, + "panels": [], + "title": "Service Discovery", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 42, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_sd_discovered_targets{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}-{{config}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Discovered Targets", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 68 + }, + "hiddenSeries": false, + "id": 96, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_sd_updates_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[5m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Sent Updates/s", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 68 + }, + "hiddenSeries": false, + "id": 97, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_sd_received_updates_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[5m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Received Updates/s", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 75 + }, + "id": 99, + "panels": [], + "title": "Scraping", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 76 + }, + "hiddenSeries": false, + "id": 105, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_target_interval_length_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m]) / rate(prometheus_target_interval_length_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{interval}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Scrape Interval", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 76 + }, + "hiddenSeries": false, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_target_scrapes_exceeded_sample_limit_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Exceeded Sample Limit", + "refId": "A" + }, + { + "expr": "rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Duplicate Timestamp", + "refId": "C" + }, + { + "expr": "rate(prometheus_target_scrapes_sample_out_of_bounds_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out Of Bounds ", + "refId": "D" + }, + { + "expr": "rate(prometheus_target_scrapes_sample_out_of_order_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Out of Order", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Scrape Problems/s", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 76 + }, + "hiddenSeries": false, + "id": 95, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_target_metadata_cache_bytes{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{scrape_job}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Metadata Cache Size", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 83 + }, + "id": 63, + "panels": [], + "title": "Query Engine", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "Time spent in each mode, per second", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 84 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_engine_query_duration_seconds_sum{job=\"prometheus\",}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{slice}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Query engine timings/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 84 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_rule_group_iterations_missed_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m]) ", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Rule group missed", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "B", + "step": 10 + }, + { + "expr": "rate(prometheus_rule_evaluation_failures_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Rule evals failed", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rule group evaulation problems/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 84 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_rule_group_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Rule evaluation duration", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Evaluation time of rule groups/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 91 + }, + "id": 77, + "panels": [ + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 92 + }, + "hiddenSeries": false, + "id": 86, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_notifications_sent_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{alertmanager}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notification Sent/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 92 + }, + "hiddenSeries": false, + "id": 87, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_notifications_errors_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m]) / rate(prometheus_notifications_sent_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{alertmanager}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notification Error Ratio", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 92 + }, + "hiddenSeries": false, + "id": 81, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_notifications_latency_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m]) / rate(prometheus_notifications_latency_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{alertmanager}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notification Latency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 99 + }, + "hiddenSeries": false, + "id": 85, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_notifications_alertmanagers_discovered{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Alertmanagers", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Alertmanagers Discovered", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 99 + }, + "hiddenSeries": false, + "id": 89, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_notifications_dropped_total{job=\"prometheus\",instance=\"$Prometheus:9090\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Dropped", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notifications Dropped/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 99 + }, + "hiddenSeries": false, + "id": 88, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_notifications_queue_length{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Pending", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + }, + { + "expr": "prometheus_notifications_queue_capacity{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Max", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Notification Queue", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Notification", + "type": "row" + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 92 + }, + "id": 58, + "panels": [], + "title": "HTTP Server", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 93 + }, + "hiddenSeries": false, + "id": 38, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_http_request_duration_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{handler}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "HTTP requests/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 93 + }, + "hiddenSeries": false, + "id": 37, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_http_request_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m]) / rate(prometheus_http_request_duration_seconds_count{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{handler}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "HTTP request latency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "description": "", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 93 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_http_request_duration_seconds_sum{job=\"prometheus\",instance=\"$Prometheus:9090\"}[2m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{handler}}", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Time spent in HTTP requests/s", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 100 + }, + "id": 61, + "panels": [], + "repeat": "RuleGroup", + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;CPU", + "value": "/etc/config/rules;CPU" + } + }, + "title": "Rule Group: $RuleGroup", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Interval": "#890f02", + "Last Duration": "#f9934e", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 101 + }, + "hiddenSeries": false, + "id": 43, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "repeatDirection": "h", + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;CPU", + "value": "/etc/config/rules;CPU" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_rule_group_interval_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Interval", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + }, + { + "expr": "prometheus_rule_group_last_duration_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Last Duration", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$RuleGroup: Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Interval": "#890f02", + "Last Duration": "#f9934e", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 101 + }, + "hiddenSeries": false, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeatDirection": "h", + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;CPU", + "value": "/etc/config/rules;CPU" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_rule_group_rules{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Rules", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$RuleGroup: Rules", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "default-kubecost", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 108 + }, + "id": 108, + "panels": [], + "repeat": null, + "repeatIteration": 1603144824023, + "repeatPanelId": 61, + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;Savings", + "value": "/etc/config/rules;Savings" + } + }, + "title": "Rule Group: $RuleGroup", + "type": "row" + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Interval": "#890f02", + "Last Duration": "#f9934e", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 109 + }, + "hiddenSeries": false, + "id": 109, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "repeatDirection": "h", + "repeatIteration": 1603144824023, + "repeatPanelId": 43, + "repeatedByRow": true, + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;Savings", + "value": "/etc/config/rules;Savings" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_rule_group_interval_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Interval", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + }, + { + "expr": "prometheus_rule_group_last_duration_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Last Duration", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$RuleGroup: Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Chunks": "#1F78C1", + "Chunks to persist": "#508642", + "Interval": "#890f02", + "Last Duration": "#f9934e", + "Max chunks": "#052B51", + "Max to persist": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "default-kubecost", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 109 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.1", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeatDirection": "h", + "repeatIteration": 1603144824023, + "repeatPanelId": 66, + "repeatedByRow": true, + "scopedVars": { + "RuleGroup": { + "selected": false, + "text": "/etc/config/rules;Savings", + "value": "/etc/config/rules;Savings" + } + }, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_rule_group_rules{job=\"prometheus\",instance=\"$Prometheus:9090\",rule_group=~\"$RuleGroup\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Rules", + "metric": "prometheus_local_storage_memory_chunkdescs", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "$RuleGroup: Rules", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "localhost", + "value": "localhost" + }, + "datasource": "default-kubecost", + "definition": "", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "Prometheus", + "options": [], + "query": "query_result(up{job=\"prometheus\"} == 1)", + "refresh": 2, + "regex": ".*instance=\"([^\"]+):9090\".*", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": null, + "tags": [], + "tagsQuery": null, + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": "default-kubecost", + "definition": "", + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "RuleGroup", + "options": [], + "query": "prometheus_rule_group_last_duration_seconds{job=\"prometheus\",instance=\"$Prometheus:9090\"}", + "refresh": 2, + "regex": ".*rule_group=\"(.*?)\".*", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Prometheus Benchmark - 2.17.x", + "uid": "L0HBvojWz", + "version": 4 +} diff --git a/charts/kubecost/cost-analyzer/1.70.000/questions.yml b/charts/kubecost/cost-analyzer/1.70.000/questions.yml new file mode 100644 index 000000000..ce86c64fb --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/questions.yml @@ -0,0 +1,160 @@ +questions: +# General Settings +- variable: kubecostProductConfigs.clusterName + label: Cluster Name + description: "Used for display in the cost-analyzer UI (Can be renamed in the UI)" + type: string + required: true + default: "" + group: General Settings +- variable: persistentVolume.enabled + label: Enable Persistent Volume for CostAnalyzer + description: "If true, Kubecost will create a Persistent Volume Claim for product config data" + type: boolean + default: false + show_subquestion_if: true + group: "General Settings" + subquestions: + - variable: persistentVolume.size + label: CostAnalyzer Persistent Volume Size + type: string + default: "0.2Gi" + +# Prometheus Server +- variable: global.prometheus.enabled + label: Enable Prometheus + description: If false, use an existing Prometheus install + type: boolean + default: true + group: "Prometheus" +- variable: prometheus.kubeStateMetrics.enabled + label: Enable KubeStateMetrics + description: "If true, deploy kube-state-metrics for Kubernetes metrics" + type: boolean + default: true + show_if: "global.prometheus.enabled=true" + group: "Prometheus" +- variable: prometheus.server.retention + label: Prometheus Server Retention + description: "Determines when to remove old data" + type: string + default: "15d" + show_if: "global.prometheus.enabled=true" + group: "Prometheus" +- variable: prometheus.server.persistentVolume.enabled + label: Create Persistent Volume for Prometheus + description: "If true, prometheus will create a persistent volume claim" + type: boolean + required: true + default: false + group: "Prometheus" + show_if: "global.prometheus.enabled=true" + show_subquestion_if: true + subquestions: + - variable: prometheus.server.persistentVolume.size + label: Prometheus Persistent Volume Size + type: string + default: "8Gi" + - variable: prometheus.server.persistentVolume.storageClass + label: Prometheus Persistent Volume StorageClass + description: "Prometheus data persistent volume storageClass, if not set use default StorageClass" + default: "" + type: storageclass + - variable: prometheus.server.persistentVolume.existingClaim + label: Existing Persistent Volume Claim for Prometheus + description: "If not empty, uses the specified existing PVC instead of creating new one" + type: pvc + default: "" + +# Prometheus Node Exporter +- variable: prometheus.nodeExporter.enabled + label: Enable NodeExporter + description: "If false, do not create NodeExporter daemonset" + type: boolean + default: true + group: "NodeExporter" +- variable: prometheus.serviceAccounts.nodeExporter.create + label: Enable Service Accounts NodeExporter + description: "If false, do not create NodeExporter daemonset" + type: boolean + default: true + group: "NodeExporter" + +# Prometheus AlertManager +- variable: prometheus.alertmanager.enabled + label: Enable AlertManager + type: boolean + default: true + group: "AlertManager" +- variable: prometheus.alertmanager.persistentVolume.enabled + label: Create Persistent Volume for AlertManager + description: "If true, alertmanager will create a persistent volume claim" + type: boolean + required: true + default: false + group: "AlertManager" + show_if: "prometheus.alertmanager.enabled=true" + show_subquestion_if: true + subquestions: + - variable: prometheus.alertmanager.persistentVolume.size + default: "2Gi" + description: "AlertManager data persistent volume size" + type: string + label: AlertManager Persistent Volume Size + - variable: prometheus.alertmanager.persistentVolume.storageClass + default: "" + description: "Alertmanager data persistent volume storageClass, if not set use default StorageClass" + type: storageclass + label: AlertManager Persistent Volume StorageClass + - variable: prometheus.alertmanager.persistentVolume.existingClaim + default: "" + description: "If not empty, uses the specified existing PVC instead of creating new one" + type: pvc + label: Existing Persistent Volume Claim for AlertManager + +# PushGateway +- variable: prometheus.pushgateway.enabled + label: Enable PushGateway + type: boolean + default: true + group: "PushGateway" +- variable: prometheus.pushgateway.persistentVolume.enabled + label: Create Persistent Volume for PushGateway + description: "If true, PushGateway will create a persistent volume claim" + required: true + type: boolean + default: false + group: "PushGateway" + show_if: "prometheus.pushgateway.enabled=true" + show_subquestion_if: true + subquestions: + - variable: prometheus.prometheus.pushgateway.persistentVolume.size + label: PushGateway Persistent Volume Size + type: string + default: "2Gi" + - variable: prometheus.pushgateway.persistentVolume.storageClass + label: PushGateway Persistent Volume StorageClass + description: "PushGateway data persistent volume storageClass, if not set use default StorageClass" + type: storageclass + default: "" + - variable: prometheus.pushgateway.persistentVolume.existingClaim + label: Existing Persistent Volume Claim for PushGateway + description: "If not empty, uses the specified existing PVC instead of creating new one" + type: pvc + default: "" + +# Services and Load Balancing +- variable: ingress.enabled + label: Enable Ingress + description: "Expose app using Ingress (Layer 7 Load Balancer)" + default: true + type: boolean + show_subquestion_if: true + group: "Services and Load Balancing" + subquestions: + - variable: ingress.hosts[0] + default: "xip.io" + description: "Hostname to your CostAnalyzer installation" + type: hostname + required: true + label: Hostname diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/NOTES.txt b/charts/kubecost/cost-analyzer/1.70.000/templates/NOTES.txt new file mode 100644 index 000000000..bbc2594e9 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/NOTES.txt @@ -0,0 +1,10 @@ + + +-------------------------------------------------- +{{- $servicePort := .Values.service.port | default 9090 -}} +Kubecost has been successfully installed. When pods are Ready, you can enable port-forwarding with the following command: + + kubectl port-forward --namespace {{ .Release.Namespace }} deployment/{{ template "cost-analyzer.fullname" . }} {{ $servicePort }} + +Next, navigate to http://localhost:{{ $servicePort }} in a web browser. + diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/_helpers.tpl b/charts/kubecost/cost-analyzer/1.70.000/templates/_helpers.tpl new file mode 100644 index 000000000..95e2dca20 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/_helpers.tpl @@ -0,0 +1,200 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cost-analyzer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cost-analyzer.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the fully qualified name for Prometheus server service. +*/}} +{{- define "cost-analyzer.prometheus.server.name" -}} +{{- if .Values.prometheus -}} +{{- if .Values.prometheus.server -}} +{{- if .Values.prometheus.server.fullnameOverride -}} +{{- .Values.prometheus.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-prometheus-server" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- else -}} +{{- printf "%s-prometheus-server" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- else -}} +{{- printf "%s-prometheus-server" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the fully qualified name for Prometheus alertmanager service. +*/}} +{{- define "cost-analyzer.prometheus.alertmanager.name" -}} +{{- if .Values.prometheus -}} +{{- if .Values.prometheus.alertmanager -}} +{{- if .Values.prometheus.alertmanager.fullnameOverride -}} +{{- .Values.prometheus.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-prometheus-alertmanager" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- else -}} +{{- printf "%s-prometheus-alertmanager" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- else -}} +{{- printf "%s-prometheus-alertmanager" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{- define "cost-analyzer.serviceName" -}} +{{- printf "%s-%s" .Release.Name "cost-analyzer" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Network Costs name used to tie autodiscovery of metrics to daemon set pods +*/}} +{{- define "cost-analyzer.networkCostsName" -}} +{{- printf "%s-%s" .Release.Name "network-costs" -}} +{{- end -}} + +{{- define "kubecost.clusterControllerName" -}} +{{- printf "%s-%s" .Release.Name "cluster-controller" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cost-analyzer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "cost-analyzer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cost-analyzer.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the common labels. +*/}} +{{- define "cost-analyzer.commonLabels" -}} +app.kubernetes.io/name: {{ include "cost-analyzer.name" . }} +helm.sh/chart: {{ include "cost-analyzer.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app: cost-analyzer +{{- end -}} + +{{/* +Create the selector labels. +*/}} +{{- define "cost-analyzer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "cost-analyzer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: cost-analyzer +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "cost-analyzer.daemonset.apiVersion" -}} +{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.9-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for priorityClass. +*/}} +{{- define "cost-analyzer.priorityClass.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "scheduling.k8s.io/v1beta1" -}} +{{- else if semverCompare "^1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "scheduling.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "cost-analyzer.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "cost-analyzer.podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.3-0, <1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "^1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Recursive filter which accepts a map containing an input map (.v) and an output map (.r). The template +will traverse all values inside .v recursively writing non-map values to the output .r. If a nested map +is discovered, we look for an 'enabled' key. If it doesn't exist, we continue traversing the +map. If it does exist, we omit the inner map traversal iff enabled is false. This filter writes the +enabled only version to the output .r +*/}} +{{- define "cost-analyzer.filter" -}} +{{- $v := .v }} +{{- $r := .r }} +{{- range $key, $value := .v }} + {{- $tp := kindOf $value -}} + {{- if eq $tp "map" -}} + {{- $isEnabled := true -}} + {{- if (hasKey $value "enabled") -}} + {{- $isEnabled = $value.enabled -}} + {{- end -}} + {{- if $isEnabled -}} + {{- $rr := "{}" | fromYaml }} + {{- template "cost-analyzer.filter" (dict "v" $value "r" $rr) }} + {{- $_ := set $r $key $rr -}} + {{- end -}} + {{- else -}} + {{- $_ := set $r $key $value -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +This template accepts a map and returns a base64 encoded json version of the map where all disabled +leaf nodes are omitted. + +The implied use case is {{ template "cost-analyzer.filterEnabled" .Values }} +*/}} +{{- define "cost-analyzer.filterEnabled" -}} +{{- $result := "{}" | fromYaml }} +{{- template "cost-analyzer.filter" (dict "v" . "r" $result) }} +{{- $result | toJson | b64enc }} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/aws-service-key-secret.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/aws-service-key-secret.yaml new file mode 100644 index 000000000..6b8de4d5e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/aws-service-key-secret.yaml @@ -0,0 +1,17 @@ +{{- if .Values.kubecostProductConfigs }} +{{- if .Values.kubecostProductConfigs.createServiceKeySecret }} +{{- if .Values.kubecostProductConfigs.awsServiceKeyName }} +apiVersion: v1 +kind: Secret +metadata: + name: cloud-service-key +type: Opaque +stringData: + service-key.json: |- + { + "aws_access_key_id": "{{ .Values.kubecostProductConfigs.awsServiceKeyName }}", + "aws_secret_access_key": "{{ .Values.kubecostProductConfigs.awsServiceKeyPassword }}" + } +{{- end -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-deployment-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-deployment-template.yaml new file mode 100644 index 000000000..35f5b9991 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-deployment-template.yaml @@ -0,0 +1,26 @@ +{{- if .Values.awsstore }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cost-analyzer.fullname" . }}-awsstore + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: awsstore + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: awsstore + spec: + serviceAccountName: awsstore-serviceaccount + containers: + - image: {{ .Values.awsstore.imageNameAndVersion }} + name: awsstore +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-service-account-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-service-account-template.yaml new file mode 100644 index 000000000..65f37e652 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/awsstore-service-account-template.yaml @@ -0,0 +1,12 @@ +{{- if .Values.awsstore }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: awsstore-serviceaccount + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +{{- with .Values.awsstore.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/azure-service-key-secret.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/azure-service-key-secret.yaml new file mode 100644 index 000000000..bb71e064b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/azure-service-key-secret.yaml @@ -0,0 +1,21 @@ +{{- if .Values.kubecostProductConfigs }} +{{- if .Values.kubecostProductConfigs.createServiceKeySecret }} +{{- if .Values.kubecostProductConfigs.azureSubscriptionID }} +apiVersion: v1 +kind: Secret +metadata: + name: cloud-service-key +type: Opaque +stringData: + service-key.json: |- + { + "subscriptionId": "{{ .Values.kubecostProductConfigs.azureSubscriptionID }}", + "serviceKey": { + "appId": "{{ .Values.kubecostProductConfigs.azureClientID }}", + "password": "{{ .Values.kubecostProductConfigs.azureClientPassword }}", + "tenant": "{{ .Values.kubecostProductConfigs.azureTenantID }}" + } + } +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-alerts-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-alerts-configmap.yaml new file mode 100644 index 000000000..b627cf874 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-alerts-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.global.notifications.alertConfigs }} +{{- if .Values.global.notifications.alertConfigs.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: alert-configs + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + alerts.json: '{{ toJson .Values.global.notifications.alertConfigs }}' +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-checks-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-checks-template.yaml new file mode 100644 index 000000000..eef8d187d --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-checks-template.yaml @@ -0,0 +1,63 @@ +{{- if .Values.kubecostChecks -}} +{{- if .Values.kubecostChecks.enabled -}} +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: cost-analyzer-checks + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + {{- if .Values.kubecostChecks.debug }} + schedule: "*/1 * * * *" + {{- else if .Values.kubecostChecks.schedule }} + schedule: {{ .Values.kubecostChecks.schedule | quote }} + {{- else }} + schedule: "*/10 * * * *" + {{- end }} + jobTemplate: + metadata: + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 8 }} + spec: + template: + spec: + containers: + - name: cost-analyzer-checks + {{- if .Values.kubecostChecks }} + image: {{ .Values.kubecostChecks.image }}:prod-{{ $.Chart.AppVersion }} + {{- else }} + image: gcr.io/kubecost1/checks:prod-{{ $.Chart.AppVersion }} + {{ end }} + imagePullPolicy: Always + args: + - node + - ./node/cron.js + resources: +{{ toYaml .Values.kubecostChecks.resources | indent 14 }} + env: + - name: COST_ANALYZER_SERVER_ENDPOINT + value: {{ template "cost-analyzer.serviceName" . }}.{{ .Release.Namespace }}:9001 + - name: COST_ANALYZER_MODEL_ENDPOINT + {{- if .Values.saml.enabled }} + value: {{ template "cost-analyzer.serviceName" . }}.{{ .Release.Namespace }}:9004 + {{- else }} + value: {{ template "cost-analyzer.serviceName" . }}.{{ .Release.Namespace }}:9003 + {{ end }} + {{- if .Values.kubecostChecks }} + {{- if .Values.kubecostChecks.debug }} + - name: SEND_UPDATES_NOW + value: "true" + {{- end }} + {{- end }} + - name: PROMETHEUS_ALERTMANAGER_ENDPOINT + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: prometheus-alertmanager-endpoint + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + restartPolicy: OnFailure +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluser-role-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluser-role-template.yaml new file mode 100644 index 000000000..16f926e8a --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluser-role-template.yaml @@ -0,0 +1,105 @@ +{{- if and .Values.reporting .Values.reporting.logCollection -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: {{ .Release.Namespace }} + name: {{ template "cost-analyzer.serviceAccountName" . }} +rules: +- apiGroups: + - '' + resources: + - "pods/log" + verbs: + - get + - list + - watch +--- +{{- end }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "cost-analyzer.serviceAccountName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +rules: + - apiGroups: + - '' + resources: + - configmaps + - deployments + - nodes + - pods + - events + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - namespaces + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - statefulsets + - deployments + - daemonsets + - replicasets + verbs: + - list + - watch + - apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - events.k8s.io + resources: + - events + verbs: + - get + - list + - watch diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluster-role-binding-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluster-role-binding-template.yaml new file mode 100644 index 000000000..f2952b923 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-cluster-role-binding-template.yaml @@ -0,0 +1,33 @@ +{{- if .Values.reporting }} +{{- if .Values.reporting.logCollection }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "cost-analyzer.serviceAccountName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cost-analyzer.serviceAccountName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cost-analyzer.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +{{- end }} +{{- end }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "cost-analyzer.serviceAccountName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cost-analyzer.serviceAccountName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cost-analyzer.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-config-map-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-config-map-template.yaml new file mode 100644 index 000000000..86aa0ee51 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-config-map-template.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cost-analyzer.fullname" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + {{- if .Values.global.prometheus.enabled }} + {{- if .Values.global.zone }} + prometheus-alertmanager-endpoint: http://{{ template "cost-analyzer.prometheus.alertmanager.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.zone }} + {{ else }} + prometheus-alertmanager-endpoint: http://{{ template "cost-analyzer.prometheus.alertmanager.name" . }}.{{ .Release.Namespace }} + {{- end -}} + {{ else }} + prometheus-alertmanager-endpoint: {{ .Values.global.notifications.alertmanager.fqdn }} + {{- end -}} + {{- if .Values.global.prometheus.enabled }} + {{- if .Values.global.zone }} + prometheus-server-endpoint: http://{{ template "cost-analyzer.prometheus.server.name" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.zone }} + {{ else }} + prometheus-server-endpoint: http://{{ template "cost-analyzer.prometheus.server.name" . }}.{{ .Release.Namespace }} + {{- end -}} + {{ else }} + prometheus-server-endpoint: {{ .Values.global.prometheus.fqdn }} + {{- end -}} + {{- if .Values.kubecostToken }} + kubecost-token: {{ .Values.kubecostToken }} + {{ else }} + kubecost-token: not-applied + {{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-db-pvc-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-db-pvc-template.yaml new file mode 100644 index 000000000..38b7e92f8 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-db-pvc-template.yaml @@ -0,0 +1,29 @@ +{{- if (.Values.kubecostModel.etlToDisk | default true) -}} +{{- if .Values.persistentVolume -}} +{{- if not .Values.persistentVolume.dbExistingClaim -}} +{{- if .Values.persistentVolume.enabled -}} +{{- if .Values.persistentVolume.dbPVEnabled -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "cost-analyzer.fullname" . }}-db + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + {{- if .Values.persistentVolume.dbStorageClass }} + storageClassName: {{ .Values.persistentVolume.dbStorageClass }} + {{ end }} + resources: + requests: + {{- if .Values.persistentVolume }} + storage: {{ .Values.persistentVolume.dbSize }} + {{- else }} + storage: 32.0Gi + {{ end }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-deployment-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-deployment-template.yaml new file mode 100644 index 000000000..2bfd2896b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-deployment-template.yaml @@ -0,0 +1,672 @@ +{{- $nginxPort := int .Values.service.port | default 9090 -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cost-analyzer.fullname" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +{{- if .Values.kubecostDeployment }} +{{- with .Values.kubecostDeployment.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} +spec: +{{- if .Values.kubecostDeployment }} + replicas: {{ .Values.kubecostDeployment.replicas | default 1 }} +{{- end }} + selector: + matchLabels: +{{ include "cost-analyzer.selectorLabels" . | nindent 8}} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + {{ include "cost-analyzer.selectorLabels" . | nindent 8 }} +{{- if .Values.kubecostDeployment }} +{{- with .Values.kubecostDeployment.labels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.global.podAnnotations}} + annotations: +{{- with .Values.global.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} + spec: + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + securityContext: + runAsUser: 0 + {{- else }} + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + {{- end }} + {{- else if lt $nginxPort 1025 }} + securityContext: + runAsUser: 0 + {{- else }} + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + {{- end }} + restartPolicy: Always + serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} + volumes: + - name: nginx-conf + configMap: + name: nginx-conf + items: + - key: nginx.conf + path: default.conf + {{- if .Values.kubecostProductConfigs }} + {{- if .Values.kubecostProductConfigs.productKey }} + {{- if .Values.kubecostProductConfigs.productKey.secretname }} + - name: productkey-secret + secret: + secretName: {{ .Values.kubecostProductConfigs.productKey.secretname }} + items: + - key: productkey.json + path: productkey.json + {{- end }} + {{- end -}} + {{- if .Values.kubecostProductConfigs.gcpSecretName }} + - name: gcp-key-secret + secret: + secretName: {{ .Values.kubecostProductConfigs.gcpSecretName }} + items: + - key: compute-viewer-kubecost-key.json + path: key.json + {{- end }} + {{- if .Values.kubecostProductConfigs.serviceKeySecretName }} + - name: service-key-secret + secret: + secretName: {{ .Values.kubecostProductConfigs.serviceKeySecretName }} + {{- else if .Values.kubecostProductConfigs.createServiceKeySecret }} + - name: service-key-secret + secret: + secretName: cloud-service-key + {{- end }} + {{- if .Values.kubecostProductConfigs.clusters }} + - name: kubecost-clusters + configMap: + name: kubecost-clusters + {{- range .Values.kubecostProductConfigs.clusters }} + {{- if .auth }} + {{- if .auth.secretName }} + - name: {{ .auth.secretName }} + secret: + secretName: {{ .auth.secretName }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + - name: tls + secret: + secretName : {{ .Values.kubecostFrontend.tls.secretName }} + items: + - key: tls.crt + path: kc.crt + - key: tls.key + path: kc.key + {{- end }} + {{- end }} + {{- if .Values.saml }} + {{- if .Values.saml.enabled }} + {{- if .Values.saml.secretName }} + - name: secret-volume + secret: + secretName: {{ .Values.saml.secretName }} + {{- end }} + {{- if .Values.saml.metadataSecretName }} + - name: metadata-secret-volume + secret: + secretName: {{ .Values.saml.metadataSecretName }} + {{- end }} + {{- if .Values.saml.rbac.enabled }} + - name: saml-roles + configMap: + name: {{ template "cost-analyzer.fullname" . }}-saml + {{- end }} + {{- end }} + {{- end }} + - name: persistent-configs +{{- if .Values.persistentVolume }} +{{- if .Values.persistentVolume.enabled }} + persistentVolumeClaim: +{{- if .Values.persistentVolume.existingClaim }} + claimName: {{ .Values.persistentVolume.existingClaim }} +{{- else }} + claimName: {{ template "cost-analyzer.fullname" . }} +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- else }} + persistentVolumeClaim: + claimName: {{ template "cost-analyzer.fullname" . }} +{{- end }} +{{- if and (.Values.kubecostModel.etlToDisk | default true) .Values.persistentVolume.dbPVEnabled }} + - name: persistent-db +{{- if .Values.persistentVolume }} +{{- if .Values.persistentVolume.enabled }} + persistentVolumeClaim: +{{- if .Values.persistentVolume.dbExistingClaim }} + claimName: {{ .Values.persistentVolume.dbExistingClaim }} +{{- else }} + claimName: {{ template "cost-analyzer.fullname" . }}-db +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- else }} + persistentVolumeClaim: + claimName: {{ template "cost-analyzer.fullname" . }}-db +{{- end }} +{{- end }} + initContainers: +{{- if .Values.supportNFS }} + - name: config-db-perms-fix + {{- if .Values.initChownDataImage }} + image: {{ .Values.initChownDataImage }} + {{- else }} + image: busybox + {{- end }} + resources: +{{ toYaml .Values.initChownData.resources | indent 12 }} + {{- if and (.Values.kubecostModel.etlToDisk | default true) .Values.persistentVolume.dbPVEnabled }} + command: ["sh", "-c", "/bin/chmod -R 777 /var/configs && /bin/chmod -R 777 /var/db"] + {{- else }} + command: ["sh", "-c", "/bin/chmod -R 777 /var/configs"] + {{- end}} + volumeMounts: + - name: persistent-configs + mountPath: /var/configs + {{- if and (.Values.kubecostModel.etlToDisk | default true) .Values.persistentVolume.dbPVEnabled }} + - name: persistent-db + mountPath: /var/db + {{- end }} + securityContext: + runAsUser: 0 +{{ end }} + containers: + {{- if .Values.kubecostModel }} + {{- if .Values.imageVersion}} + - image: {{ .Values.kubecostModel.image }}:{{ .Values.imageVersion }} + {{- else }} + - image: {{ .Values.kubecostModel.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + - image: gcr.io/kubecost1/cost-model:prod-{{ $.Chart.AppVersion }} + {{ end }} + name: cost-model + {{- if .Values.kubecostModel.imagePullPolicy }} + imagePullPolicy: {{ .Values.kubecostModel.imagePullPolicy }} + {{- else }} + imagePullPolicy: Always + {{- end }} + resources: +{{ toYaml .Values.kubecostModel.resources | indent 12 }} + readinessProbe: + httpGet: + path: /healthz + port: 9003 + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 200 + volumeMounts: + - name: persistent-configs + mountPath: /var/configs + {{- if and (.Values.kubecostModel.etlToDisk | default true) .Values.persistentVolume.dbPVEnabled }} + - name: persistent-db + mountPath: /var/db + {{- end }} + {{- if .Values.kubecostProductConfigs }} + {{- if .Values.kubecostProductConfigs.productKey }} + {{- if .Values.kubecostProductConfigs.productKey.secretname }} + - name: productkey-secret + mountPath: /var/configs/productkey + {{- end }} + {{- end }} + {{- if .Values.kubecostProductConfigs.gcpSecretName }} + - name: gcp-key-secret + mountPath: /models + {{- end }} + {{- if or .Values.kubecostProductConfigs.serviceKeySecretName .Values.kubecostProductConfigs.createServiceKeySecret }} + - name: service-key-secret + mountPath: /var/secrets + {{- end }} + {{- if .Values.kubecostProductConfigs.clusters }} + - name: kubecost-clusters + mountPath: /var/configs/clusters + {{- range .Values.kubecostProductConfigs.clusters }} + {{- if .auth }} + {{- if .auth.secretName }} + - name: {{ .auth.secretName }} + mountPath: /var/secrets/{{ .auth.secretName }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.saml }} + {{- if .Values.saml.enabled }} + {{- if .Values.saml.secretName }} + - name: secret-volume + mountPath: /var/configs/secret-volume + {{- end }} + {{- if .Values.saml.metadataSecretName }} + - name: metadata-secret-volume + mountPath: /var/configs/metadata-secret-volume + {{- end }} + {{- if .Values.saml.rbac.enabled }} + - name: saml-roles + mountPath: /var/configs/saml + {{- end }} + {{- end }} + {{- end }} + env: + - name: PROMETHEUS_SERVER_ENDPOINT + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: prometheus-server-endpoint + - name: CLOUD_PROVIDER_API_KEY + value: "AIzaSyDXQPG_MHUEy9neR7stolq6l0ujXmjJlvk" # The GCP Pricing API requires a key. + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/configs/key.json + - name: CONFIG_PATH + value: /var/configs/ + - name: DB_PATH + value: /var/db/ + - name: CLUSTER_PROFILE + {{- if .Values.kubecostProductConfigs }} + value: {{ .Values.kubecostProductConfigs.clusterProfile | default "production" }} + {{- else }} + value: production + {{- end }} + - name: REMOTE_WRITE_PASSWORD + value: {{ .Values.remoteWrite.postgres.auth.password }} + {{- if .Values.remoteWrite.postgres.enabled }} + - name: REMOTE_WRITE_ENABLED + value: "true" + {{- end }} + - name: GOGC + value: "60" + {{- if .Values.global.thanos.queryServiceBasicAuthSecretName}} + - name: MC_BASIC_AUTH_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBasicAuthSecretName }} + key: USERNAME + - name: MC_BASIC_AUTH_PW + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBasicAuthSecretName }} + key: PASSWORD + {{- end }} + {{- if .Values.global.prometheus.queryServiceBasicAuthSecretName}} + - name: DB_BASIC_AUTH_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBasicAuthSecretName }} + key: USERNAME + - name: DB_BASIC_AUTH_PW + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBasicAuthSecretName }} + key: PASSWORD + {{- end }} + {{- if .Values.global.prometheus.queryServiceBearerTokenSecretName }} + - name: DB_BEARER_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBearerTokenSecretName }} + key: TOKEN + {{- end }} + {{- if .Values.global.thanos.queryServiceBearerTokenSecretName }} + - name: MC_BEARER_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBearerTokenSecretName }} + key: TOKEN + {{- end }} + {{- if .Values.global.prometheus.insecureSkipVerify }} + - name: INSECURE_SKIP_VERIFY + value: {{ (quote .Values.global.prometheus.insecureSkipVerify) }} + {{- end }} + {{- if .Values.pricingCsv }} + {{- if .Values.pricingCsv.enabled }} + - name: USE_CSV_PROVIDER + value: "true" + - name: CSV_PATH + value: {{ .Values.pricingCsv.location.URI }} + - name: CSV_REGION + value: {{ .Values.pricingCsv.location.region }} + {{- if eq .Values.pricingCsv.location.provider "AWS"}} + {{- if .Values.pricingCsv.location.csvAccessCredentials }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Values.pricingCsv.location.csvAccessCredentials }} + key: AWS_ACCESS_KEY_ID + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.pricingCsv.location.csvAccessCredentials }} + key: AWS_SECRET_ACCESS_KEY + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.reporting }} + - name: LOG_COLLECTION_ENABLED + value: {{ (quote .Values.reporting.logCollection ) | default (quote true) }} + - name: PRODUCT_ANALYTICS_ENABLED + value: {{ (quote .Values.reporting.productAnalytics ) | default (quote true) }} + - name: ERROR_REPORTING_ENABLED + value: {{ (quote .Values.reporting.errorReporting ) | default (quote true) }} + - name: VALUES_REPORTING_ENABLED + value: {{ (quote .Values.reporting.valuesReporting) | default (quote true) }} + {{- if .Values.reporting.errorReporting }} + - name: SENTRY_DSN + value: "https://71964476292e4087af8d5072afe43abd@o394722.ingest.sentry.io/5245431" + {{- end }} + {{- end }} + - name: CACHE_WARMING_ENABLED + value: {{ (quote .Values.kubecostModel.warmCache) | default (quote true) }} + - name: SAVINGS_CACHE_WARMING_ENABLED + value: {{ (quote .Values.kubecostModel.warmSavingsCache) | default (quote true) }} + - name: ETL_ENABLED + value: {{ (quote .Values.kubecostModel.etl) | default (quote true) }} + - name: ETL_TO_DISK_ENABLED + value: {{ (quote .Values.kubecostModel.etlToDisk) | default (quote true) }} + - name : ETL_CLOUD_ASSETS_ENABLED + value: {{ (quote .Values.kubecostModel.etlCloudAssets) | default (quote true) }} + {{- if .Values.persistentVolume.dbPVEnabled }} + - name: ETL_PATH_PREFIX + value: "/var/db" + {{- end }} + {{- if .Values.kubecostModel.etlStoreDurationDays }} + - name: ETL_STORE_DURATION_DAYS + value: {{ (quote .Values.kubecostModel.etlStoreDurationDays) | default (quote 120) }} + {{- end }} + - name: PV_ENABLED + value: {{ (quote .Values.persistentVolume.enabled) | default (quote true) }} + - name: MAX_QUERY_CONCURRENCY + value: {{ (quote .Values.kubecostModel.maxQueryConcurrency) | default (quote 5) }} + - name: UTC_OFFSET + value: {{ (quote .Values.kubecostModel.utcOffset) | default (quote ) }} + {{- if .Values.networkCosts }} + {{- if .Values.networkCosts.enabled }} + - name: NETWORK_COSTS_PORT + value: {{ quote .Values.networkCosts.port | default (quote 3001) }} + {{- end }} + {{- end }} + {{- /* + If queryService is set, the cost-analyzer will always pass THANOS_ENABLED as true + to ensure that the custom query service target is used. The global.thanos.enabled + flag does not have any affect on this behavior. + */}} + {{- if .Values.global.thanos.queryService }} + - name: THANOS_ENABLED + value: "true" + - name: THANOS_QUERY_URL + value: {{ .Values.global.thanos.queryService }} + - name: THANOS_QUERY_OFFSET + value: {{ .Values.global.thanos.queryOffset | default "3h" }} + - name: THANOS_MAX_SOURCE_RESOLUTION + value: {{ .Values.kubecostModel.maxSourceResolution | default "raw" }} + {{- else if and .Values.global.thanos.enabled .Values.thanos }} + {{- if .Values.thanos.query }} + {{- if .Values.thanos.query.enabled }} + - name: THANOS_ENABLED + value: "true" + - name: THANOS_QUERY_URL + value: http://{{ .Release.Name }}-thanos-query-frontend-http.{{ .Release.Namespace }}:{{ .Values.thanos.queryFrontend.http.port }} + - name: THANOS_QUERY_OFFSET + value: {{ .Values.global.thanos.queryOffset | default "3h" }} + - name: THANOS_MAX_SOURCE_RESOLUTION + value: {{ .Values.kubecostModel.maxSourceResolution | default "raw" }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.saml }} + {{- if .Values.saml.enabled }} + - name: SAML_ENABLED + value: "true" + - name: IDP_URL + value: {{ .Values.saml.idpMetadataURL }} + - name: SP_HOST + value: {{ .Values.saml.appRootURL }} + {{- if .Values.saml.audienceURI }} + - name: AUDIENCE_URI + value: {{ .Values.saml.audienceURI }} + {{- end }} + {{- if .Values.saml.rbac.enabled }} + - name: SAML_RBAC_ENABLED + value: "true" + {{- end }} + {{- end }} + {{- end }} + {{- if and (.Values.prometheus.server.global.external_labels.cluster_id) (not .Values.prometheus.server.clusterIDConfigmap) }} + - name: CLUSTER_ID + value: {{ .Values.prometheus.server.global.external_labels.cluster_id }} + {{- end }} + {{- if .Values.prometheus.server.clusterIDConfigmap }} + - name: CLUSTER_ID + valueFrom: + configMapKeyRef: + name: {{ .Values.prometheus.server.clusterIDConfigmap }} + key: CLUSTER_ID + {{- end }} + {{- if .Values.remoteWrite.postgres.installLocal }} + - name: SQL_ADDRESS + value: pgprometheus + {{- else }} + - name: SQL_ADDRESS + value: {{ .Values.remoteWrite.postgres.remotePostgresAddress }} + {{- end }} + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: KUBECOST_NAMESPACE + value: {{ .Release.Namespace }} + - name: KUBECOST_TOKEN + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: kubecost-token + {{- if .Values.kubecostFrontend }} + {{- if .Values.imageVersion}} + - image: {{ .Values.kubecostFrontend.image }}:{{ .Values.imageVersion }} + {{- else }} + - image: {{ .Values.kubecostFrontend.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + - image: gcr.io/kubecost1/frontend:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + command: ["nginx", "-g", "daemon off;"] + ports: + - containerPort: 443 + {{- end }} + {{- end }} + env: + - name: GET_HOSTS_FROM + value: dns + name: cost-analyzer-frontend + volumeMounts: + - name: nginx-conf + mountPath: /etc/nginx/conf.d/ + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + - name: tls + mountPath: /etc/ssl/certs + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.kubecostFrontend.resources | indent 12 }} + {{- if .Values.kubecostFrontend.imagePullPolicy }} + imagePullPolicy: {{ .Values.kubecostFrontend.imagePullPolicy }} + {{- else }} + imagePullPolicy: Always + {{- end }} + readinessProbe: + httpGet: + path: /healthz + port: 9003 + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 200 + {{- if .Values.kubecost }} + {{- if .Values.imageVersion}} + - image: {{ .Values.kubecost.image }}:{{ .Values.imageVersion }} + {{- else }} + - image: {{ .Values.kubecost.image }}:prod-{{ $.Chart.AppVersion }} + {{ end }} + {{- else }} + - image: gcr.io/kubecost1/server:prod-{{ $.Chart.AppVersion }} + {{ end }} + resources: +{{ toYaml .Values.kubecost.resources | indent 12 }} + name: cost-analyzer-server + readinessProbe: + httpGet: + path: /healthz + port: 9003 + initialDelaySeconds: 30 + periodSeconds: 10 + failureThreshold: 200 + volumeMounts: + - name: persistent-configs + mountPath: /var/configs + env: + - name: PROMETHEUS_SERVER_ENDPOINT + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: prometheus-server-endpoint + {{- if .Values.reporting }} + {{- if .Values.reporting.valuesReporting }} + - name: HELM_VALUES + value: {{ template "cost-analyzer.filterEnabled" .Values }} + {{- end }} + {{- end }} + {{- if .Values.global.prometheus.insecureSkipVerify }} + - name: INSECURE_SKIP_VERIFY + value: {{ (quote .Values.global.prometheus.insecureSkipVerify) }} + {{- end }} + {{- /* + If queryService is set, the cost-analyzer will always pass THANOS_ENABLED as true + to ensure that the custom query service target is used. The global.thanos.enabled + flag does not have any affect on this behavior. + */}} + {{- if .Values.global.thanos.queryService }} + - name: THANOS_ENABLED + value: "true" + - name: THANOS_QUERY_URL + value: {{ .Values.global.thanos.queryService }} + - name: THANOS_QUERY_OFFSET + value: {{ .Values.global.thanos.queryOffset | default "3h" }} + - name: THANOS_MAX_SOURCE_RESOLUTION + value: {{ .Values.kubecostModel.maxSourceResolution | default "raw" }} + {{- else if and .Values.global.thanos.enabled .Values.thanos }} + {{- if .Values.thanos.query }} + {{- if .Values.thanos.query.enabled }} + - name: THANOS_ENABLED + value: "true" + - name: THANOS_QUERY_URL + value: http://{{ .Release.Name }}-thanos-query-frontend-http.{{ .Release.Namespace }}:{{ .Values.thanos.queryFrontend.http.port }} + - name: THANOS_QUERY_OFFSET + value: {{ .Values.global.thanos.queryOffset | default "3h" }} + - name: THANOS_MAX_SOURCE_RESOLUTION + value: {{ .Values.kubecostModel.maxSourceResolution | default "raw" }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.global.thanos.queryServiceBasicAuthSecretName}} + - name: MC_BASIC_AUTH_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBasicAuthSecretName }} + key: USERNAME + - name: MC_BASIC_AUTH_PW + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBasicAuthSecretName }} + key: PASSWORD + {{- end }} + {{- if .Values.global.prometheus.queryServiceBasicAuthSecretName}} + - name: DB_BASIC_AUTH_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBasicAuthSecretName }} + key: USERNAME + - name: DB_BASIC_AUTH_PW + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBasicAuthSecretName }} + key: PASSWORD + {{- end }} + {{- if .Values.global.prometheus.queryServiceBearerTokenSecretName }} + - name: DB_BEARER_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.prometheus.queryServiceBearerTokenSecretName }} + key: TOKEN + {{- end }} + {{- if .Values.global.thanos.queryServiceBearerTokenSecretName }} + - name: MC_BEARER_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.global.thanos.queryServiceBearerTokenSecretName }} + key: TOKEN + {{- end }} + - name: KUBECOST_NAMESPACE + value: {{ .Release.Namespace }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/configs/key.json + - name: KUBECOST_TOKEN + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: kubecost-token + {{- if eq .Values.global.grafana.proxy false }} + - name: GRAFANA_URL + valueFrom: + configMapKeyRef: + name: external-grafana-config-map + key: grafanaURL + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 2 }} + {{- end }} + {{- if .Values.priority }} + {{- if .Values.priority.enabled }} + priorityClassName: {{ template "cost-analyzer.fullname" . }}-priority + {{- end }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-frontend-config-map-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-frontend-config-map-template.yaml new file mode 100644 index 000000000..429ed1f90 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-frontend-config-map-template.yaml @@ -0,0 +1,174 @@ +{{- $serviceName := include "cost-analyzer.serviceName" . -}} +{{- $nginxPort := .Values.service.targetPort | default 9090 -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + nginx.conf: | + gzip_static on; + + # Enable gzip encoding for content of the provided types of 50kb and higher. + gzip on; + gzip_min_length 50000; + gzip_proxied expired no-cache no-store private auth; + gzip_types + text/plain + text/css + text/xml + text/javascript + application/x-javascript + application/xml + application/json; + + upstream api { + server {{ $serviceName }}.{{ .Release.Namespace }}:9001; + } + + upstream model { + server {{ $serviceName }}.{{ .Release.Namespace }}:9003; + } + +{{- if .Values.clusterController }} +{{- if .Values.clusterController.enabled }} + upstream clustercontroller { + server {{ template "kubecost.clusterControllerName" . }}-service.{{ .Release.Namespace }}:9731; + } +{{- end }} +{{- end }} + +{{- if .Values.global.grafana.proxy }} + upstream grafana { +{{- if .Values.global.grafana.enabled }} + server {{ .Release.Name }}-grafana.{{ .Release.Namespace }}; +{{ else }} + server {{.Values.global.grafana.domainName}}; +{{ end }} + } +{{ end }} + + server { + server_name _; + root /var/www; + index index.html; + add_header Cache-Control "max-age=300"; + add_header Cache-Control "must-revalidate"; +{{- if .Values.imageVersion }} + add_header ETag "{{ $.Values.imageVersion }}"; +{{- else }} + add_header ETag "{{ $.Chart.Version }}"; +{{- end }} +{{- if .Values.kubecostFrontend.tls }} +{{- if .Values.kubecostFrontend.tls.enabled }} + ssl_certificate /etc/ssl/certs/kc.crt; + ssl_certificate_key /etc/ssl/certs/kc.key; + listen 443 ssl; +{{- else }} + listen {{ $nginxPort }}; +{{- end }} +{{- else }} + listen {{ $nginxPort }}; +{{- end }} + location /api/ { + {{- if .Values.saml.enabled }} + auth_request /auth; + {{- end }} + proxy_pass http://api/; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location /model/ { + proxy_connect_timeout 180; + proxy_send_timeout 180; + proxy_read_timeout 180; + proxy_pass http://model/; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~ ^/(turndown|cluster)/ { + + add_header 'Access-Control-Allow-Origin' '*' always; +{{- if .Values.clusterController }} +{{- if .Values.clusterController.enabled }} + {{- if .Values.saml }} + {{- if .Values.saml.enabled }} + auth_request /auth; + {{- else if .Values.saml.rbac.enabled}} + auth_request /authrbac; + {{- end }} + {{- end }} + + rewrite ^/(?:turndown|cluster)/(.*)$ /$1 break; + proxy_pass http://clustercontroller; + proxy_connect_timeout 180; + proxy_send_timeout 180; + proxy_read_timeout 180; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + +{{- else }} + return 404; +{{- end }} +{{- else }} + return 404; +{{- end }} + } + location /saml/ { + proxy_connect_timeout 180; + proxy_send_timeout 180; + proxy_read_timeout 180; + proxy_pass http://model/saml/; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + location /login { + proxy_connect_timeout 180; + proxy_send_timeout 180; + proxy_read_timeout 180; + proxy_pass http://model/login; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + {{- if .Values.global.grafana.proxy }} + location /grafana/ { + {{- if .Values.saml.enabled }} + auth_request /auth; + {{- end }} + proxy_pass http://grafana/; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + {{ end }} + {{- if .Values.saml.enabled }} + location /auth { + proxy_pass http://model/isAuthenticated; + } + {{- end }} + {{- if .Values.saml.rbac.enabled }} + location /authrbac { + proxy_pass http://model/isAdminAuthenticated; + } + {{- end }} + } diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-ingress-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-ingress-template.yaml new file mode 100644 index 000000000..1b6e8776a --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-ingress-template.yaml @@ -0,0 +1,40 @@ +{{- if .Values.ingress -}} +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "cost-analyzer.fullname" . -}} +{{- $serviceName := include "cost-analyzer.serviceName" . -}} +{{- $ingressPaths := .Values.ingress.paths -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + {{- range $ingressPaths }} + - path: {{ . }} + backend: + serviceName: {{ $serviceName }} + servicePort: frontend + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-config-map-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-config-map-template.yaml new file mode 100644 index 000000000..37276f297 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-config-map-template.yaml @@ -0,0 +1,15 @@ +{{- if .Values.networkCosts -}} +{{- if .Values.networkCosts.enabled -}} +{{- if .Values.networkCosts.config -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: network-costs-config + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + config.yaml: | +{{- toYaml .Values.networkCosts.config | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-podmonitor-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-podmonitor-template.yaml new file mode 100644 index 000000000..d45567616 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-podmonitor-template.yaml @@ -0,0 +1,31 @@ +{{- if .Values.networkCosts }} +{{- if .Values.networkCosts.enabled }} +{{- if .Values.networkCosts.podMonitor }} +{{- if .Values.networkCosts.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "cost-analyzer.networkCostsName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if .Values.networkCosts.podMonitor.additionalLabels }} + {{ toYaml .Values.networkCosts.podMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - port: http-server + honorLabels: true + interval: 1m + scrapeTimeout: 10s + path: /metrics + scheme: http + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "cost-analyzer.networkCostsName" . }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-template.yaml new file mode 100644 index 000000000..e87d8d44b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-costs-template.yaml @@ -0,0 +1,93 @@ +{{- if .Values.networkCosts -}} +{{- if .Values.networkCosts.enabled -}} +apiVersion: {{ include "cost-analyzer.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: + name: {{ template "cost-analyzer.networkCostsName" . }} + labels: + {{- include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "cost-analyzer.networkCostsName" . }} + template: + metadata: + labels: + app: {{ template "cost-analyzer.networkCostsName" . }} + spec: + hostNetwork: true + serviceAccountName: {{ template "cost-analyzer.serviceAccountName" . }} + containers: + - name: {{ template "cost-analyzer.networkCostsName" . }} + image: {{ .Values.networkCosts.image }} +{{- if .Values.networkCosts.imagePullPolicy }} + imagePullPolicy: {{ .Values.networkCosts.imagePullPolicy }} +{{- else }} + imagePullPolicy: Always +{{- end }} +{{- if .Values.networkCosts.resources }} + resources: +{{ toYaml .Values.networkCosts.resources | indent 10 }} +{{- end }} + env: + {{- if .Values.networkCosts.hostProc }} + - name: HOST_PROC + value: {{ .Values.networkCosts.hostProc.mountPath }} + {{- end }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: HOST_PORT + value: {{ (quote .Values.networkCosts.port) | default (quote 3001) }} + - name: TRAFFIC_LOGGING_ENABLED + value: {{ (quote .Values.networkCosts.trafficLogging) | default (quote true) }} + volumeMounts: + {{- if .Values.networkCosts.hostProc }} + - mountPath: {{ .Values.networkCosts.hostProc.mountPath }} + name: host-proc + {{- else }} + - mountPath: /net + name: nf-conntrack + - mountPath: /netfilter + name: netfilter + {{- end }} + {{- if .Values.networkCosts.config }} + - mountPath: /network-costs/config + name: network-costs-config + {{- end }} + securityContext: + privileged: true + ports: + - name: http-server + containerPort: {{ .Values.networkCosts.port | default 3001 }} + hostPort: {{ .Values.networkCosts.port | default 3001 }} +{{- if .Values.networkCosts.priorityClassName }} + priorityClassName: "{{ .Values.networkCosts.priorityClassName }}" +{{- end }} +{{- if .Values.networkCosts.tolerations }} + tolerations: +{{ toYaml .Values.networkCosts.tolerations | indent 8 }} + {{- end }} + volumes: + {{- if .Values.networkCosts.config }} + - name: network-costs-config + configMap: + name: network-costs-config + {{- end }} + {{- if .Values.networkCosts.hostProc }} + - name: host-proc + hostPath: + path: {{ default "/proc" .Values.networkCosts.hostProc.hostPath }} + {{- else }} + - name: nf-conntrack + hostPath: + path: /proc/net + - name: netfilter + hostPath: + path: /proc/sys/net/netfilter + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-policy.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-policy.yaml new file mode 100644 index 000000000..872951bd1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-network-policy.yaml @@ -0,0 +1,19 @@ +{{- if .Values.networkPolicy -}} +{{- if .Values.networkPolicy.enabled -}} +apiVersion: {{ include "cost-analyzer.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: deny-egress + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{ include "cost-analyzer.selectorLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: {} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pkey-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pkey-configmap.yaml new file mode 100644 index 000000000..5ce22f52b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pkey-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.kubecostProductConfigs }} +{{- if .Values.kubecostProductConfigs.productKey }} +{{- if .Values.kubecostProductConfigs.productKey.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: product-configs + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + {{- if .Values.kubecostProductConfigs.productKey.key }} + key: {{ .Values.kubecostProductConfigs.productKey.key | quote }} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-deployment.yaml new file mode 100644 index 000000000..08f82a5ac --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-deployment.yaml @@ -0,0 +1,49 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +{{- if .Values.remoteWrite.postgres.installLocal -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cost-analyzer.fullname" . }}-postgres + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: postgres + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: postgres + spec: + containers: + - image: timescale/pg_prometheus:latest-pg11 + name: pgprometheus + ports: + - containerPort: 5432 + args: + - -csynchronous_commit=off + - -S 1GB + env: + - name: POSTGRES_PASSWORD + value: {{ .Values.remoteWrite.postgres.auth.password }} + - name: POSTGRES_USER + value: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgres-pv-volume + subPath: postgres + volumes: + - name: postgres-pv-volume + persistentVolumeClaim: + claimName: postgres-pv-claim +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pv.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pv.yaml new file mode 100644 index 000000000..0a67fa928 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pv.yaml @@ -0,0 +1,22 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +{{- if .Values.remoteWrite.postgres.installLocal -}} +kind: PersistentVolume +apiVersion: v1 +metadata: + name: postgres-pv-volume + labels: + type: local + app: postgres +spec: + capacity: + storage: {{ .Values.remoteWrite.postgres.persistentVolume.size }} + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pvc.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pvc.yaml new file mode 100644 index 000000000..8c5deb444 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-pvc.yaml @@ -0,0 +1,20 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +{{- if .Values.remoteWrite.postgres.installLocal -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: postgres-pv-claim + labels: + app: postgres +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.remoteWrite.postgres.persistentVolume.size }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-service.yaml new file mode 100644 index 000000000..5488c58b1 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-postgres-service.yaml @@ -0,0 +1,22 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +{{- if .Values.remoteWrite.postgres.installLocal -}} +kind: Service +apiVersion: v1 +metadata: + name: pgprometheus + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + selector: + app: postgres + type: ClusterIP + ports: + - name: server + port: 5432 + targetPort: 5432 +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pricing-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pricing-configmap.yaml new file mode 100644 index 000000000..7163cec98 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pricing-configmap.yaml @@ -0,0 +1,121 @@ +{{- if .Values.kubecostProductConfigs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: pricing-configs + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + {{- if .Values.kubecostProductConfigs.defaultModelPricing }} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.enabled }} + {{- if .Values.kubecostProductConfigs.customPricesEnabled }} + customPricesEnabled: "{{ .Values.kubecostProductConfigs.customPricesEnabled }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.CPU }} + CPU: "{{ .Values.kubecostProductConfigs.defaultModelPricing.CPU | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.spotCPU }} + spotCPU: "{{ .Values.kubecostProductConfigs.defaultModelPricing.spotCPU | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.RAM }} + RAM: "{{ .Values.kubecostProductConfigs.defaultModelPricing.RAM | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.spotRAM }} + spotRAM: "{{ .Values.kubecostProductConfigs.defaultModelPricing.spotRAM | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.GPU }} + GPU: "{{ .Values.kubecostProductConfigs.defaultModelPricing.GPU | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.spotGPU }} + spotGPU: "{{ .Values.kubecostProductConfigs.defaultModelPricing.spotGPU | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.storage }} + storage: "{{ .Values.kubecostProductConfigs.defaultModelPricing.storage | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.zoneNetworkEgress }} + zoneNetworkEgress: "{{ .Values.kubecostProductConfigs.defaultModelPricing.zoneNetworkEgress | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.regionNetworkEgress }} + regionNetworkEgress: "{{ .Values.kubecostProductConfigs.defaultModelPricing.regionNetworkEgress | toString }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultModelPricing.internetNetworkEgress }} + internetNetworkEgress: "{{ .Values.kubecostProductConfigs.defaultModelPricing.internetNetworkEgress | toString }}" + {{- end -}} + {{- end -}} + {{- end -}} + {{- if .Values.kubecostProductConfigs.clusterName }} + clusterName: "{{ .Values.kubecostProductConfigs.clusterName }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.currencyCode }} + currencyCode: "{{ .Values.kubecostProductConfigs.currencyCode }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.azureBillingRegion }} + azureBillingRegion: "{{ .Values.kubecostProductConfigs.azureBillingRegion }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.azureSubscriptionID }} + azureSubscriptionID: "{{ .Values.kubecostProductConfigs.azureSubscriptionID }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.azureClientID }} + azureClientID: "{{ .Values.kubecostProductConfigs.azureClientID }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.azureTenantID }} + azureTenantID: "{{ .Values.kubecostProductConfigs.azureTenantID }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.discount }} + discount: "{{ .Values.kubecostProductConfigs.discount }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.negotiatedDiscount }} + negotiatedDiscount: "{{ .Values.kubecostProductConfigs.negotiatedDiscount }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.defaultIdle }} + defaultIdle: "{{ .Values.kubecostProductConfigs.defaultIdle }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.sharedNamespaces }} + sharedNamespaces: "{{ .Values.kubecostProductConfigs.sharedNamespaces }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.spotLabel }} + spotLabel: "{{ .Values.kubecostProductConfigs.spotLabel }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.spotLabelValue }} + spotLabelValue: "{{ .Values.kubecostProductConfigs.spotLabelValue }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.awsSpotDataRegion }} + spotDataRegion: "{{ .Values.kubecostProductConfigs.awsSpotDataRegion }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.awsSpotDataBucket }} + spotDataBucket: "{{ .Values.kubecostProductConfigs.awsSpotDataBucket }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.awsSpotDataPrefix }} + spotDataPrefix: "{{ .Values.kubecostProductConfigs.awsSpotDataPrefix }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.projectID }} + projectID: "{{ .Values.kubecostProductConfigs.projectID }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.bigQueryBillingDataDataset }} + billingDataDataset: "{{ .Values.kubecostProductConfigs.bigQueryBillingDataDataset }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.athenaProjectID }} + athenaProjectID: "{{ .Values.kubecostProductConfigs.athenaProjectID }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.athenaBucketName }} + athenaBucketName: "{{ .Values.kubecostProductConfigs.athenaBucketName }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.athenaRegion }} + athenaRegion: "{{ .Values.kubecostProductConfigs.athenaRegion }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.athenaDatabase }} + athenaDatabase: "{{ .Values.kubecostProductConfigs.athenaDatabase }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.athenaTable }} + athenaTable: "{{ .Values.kubecostProductConfigs.athenaTable }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.masterPayerARN}} + masterPayerARN: "{{ .Values.kubecostProductConfigs.masterPayerARN }}" + {{- end }} + {{- if .Values.kubecostProductConfigs.gpuLabel }} + gpuLabel: "{{ .Values.kubecostProductConfigs.gpuLabel }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.gpuLabelValue }} + gpuLabelValue: "{{ .Values.kubecostProductConfigs.gpuLabelValue }}" + {{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-deployment.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-deployment.yaml new file mode 100644 index 000000000..c2b282772 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-deployment.yaml @@ -0,0 +1,53 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cost-analyzer.fullname" . }}-adapter + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + selector: + matchLabels: + app: adapter + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: adapter + spec: + initContainers: + - name: kubecost-sql-init + image: {{ .Values.remoteWrite.postgres.initImage }}:prod-{{ $.Chart.AppVersion }} + {{- if .Values.remoteWrite.postgres.initImagePullPolicy }} + imagePullPolicy: {{ .Values.remoteWrite.postgres.initImagePullPolicy }} + {{- else }} + imagePullPolicy: Always + {{- end }} + env: + - name: PROMETHEUS_SERVER_ENDPOINT + valueFrom: + configMapKeyRef: + name: {{ template "cost-analyzer.fullname" . }} + key: prometheus-server-endpoint + containers: + - image: timescale/prometheus-postgresql-adapter:latest + name: pgprometheusadapter + ports: + - containerPort: 9201 + args: + {{- if .Values.remoteWrite.postgres.installLocal }} + - "-pg-host=pgprometheus" + {{- else }} + - "-pg-host={{ .Values.remoteWrite.postgres.remotePostgresAddress }}" + {{- end }} + - "-pg-prometheus-log-samples=true" + - "-pg-password={{ .Values.remoteWrite.postgres.auth.password }}" +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-service.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-service.yaml new file mode 100644 index 000000000..d36479439 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheus-postgres-adapter-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.remoteWrite -}} +{{- if .Values.remoteWrite.postgres -}} +{{- if .Values.remoteWrite.postgres.enabled -}} +kind: Service +apiVersion: v1 +metadata: + name: pgprometheus-adapter + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + selector: + app: adapter + type: ClusterIP + ports: + - name: server + port: 9201 + targetPort: 9201 +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheusrule-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheusrule-template.yaml new file mode 100644 index 000000000..61380b424 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-prometheusrule-template.yaml @@ -0,0 +1,21 @@ +{{- if .Values.prometheus }} +{{- if .Values.prometheus.serverFiles }} +{{- if .Values.prometheus.serverFiles.rules }} +{{- if .Values.prometheusRule }} +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "cost-analyzer.fullname" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if .Values.prometheusRule.additionalLabels }} + {{ toYaml .Values.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: + {{ toYaml .Values.prometheus.serverFiles.rules | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-role.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-role.template.yaml new file mode 100644 index 000000000..f326b1f1c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-role.template.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podSecurityPolicy }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: kubecost-cost-analyzer-psp + annotations: +{{- if .Values.podSecurityPolicy.annotations }} +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - kubecost-cost-analyzer-psp +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-rolebinding.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-rolebinding.template.yaml new file mode 100644 index 000000000..8e1d925c5 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp-rolebinding.template.yaml @@ -0,0 +1,16 @@ +{{- if .Values.podSecurityPolicy }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubecost-cost-analyzer-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubecost-cost-analyzer-psp +subjects: +- kind: ServiceAccount + name: {{ template "cost-analyzer.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp.template.yaml new file mode 100644 index 000000000..885ab9ca9 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-psp.template.yaml @@ -0,0 +1,20 @@ +{{- if .Values.podSecurityPolicy }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ include "cost-analyzer.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: kubecost-cost-analyzer-psp +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: + - '*' +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pvc-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pvc-template.yaml new file mode 100644 index 000000000..d3e257668 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-pvc-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.persistentVolume -}} +{{- if not .Values.persistentVolume.existingClaim -}} +{{- if .Values.persistentVolume.enabled -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ template "cost-analyzer.fullname" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + {{- if .Values.persistentVolume.storageClass }} + storageClassName: {{ .Values.persistentVolume.storageClass }} + {{ end }} + resources: + requests: + {{- if .Values.persistentVolume }} + storage: {{ .Values.persistentVolume.size }} + {{- else }} + storage: 0.2Gi + {{ end }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-saml-config-map-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-saml-config-map-template.yaml new file mode 100644 index 000000000..71ac8659c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-saml-config-map-template.yaml @@ -0,0 +1,13 @@ +{{- if .Values.saml }} +{{- if .Values.saml.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "cost-analyzer.fullname" . }}-saml + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: +{{- $root := . }} + saml.json: '{{ toJson .Values.saml }}' +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-server-configmap.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-server-configmap.yaml new file mode 100644 index 000000000..edf5ca6fa --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-server-configmap.yaml @@ -0,0 +1,69 @@ +{{- if .Values.kubecostProductConfigs }} +{{- if or .Values.kubecostProductConfigs.grafanaURL .Values.kubecostProductConfigs.labelMappingConfigs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: app-configs + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: +{{- if .Values.kubecostProductConfigs.labelMappingConfigs }} +{{- if .Values.kubecostProductConfigs.labelMappingConfigs.enabled }} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.owner_label }} + owner_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.owner_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.team_label }} + team_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.team_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.department_label }} + department_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.department_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.product_label }} + product_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.product_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.environment_label }} + environment_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.environment_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.namespace_external_label }} + namespace_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.namespace_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.cluster_external_label }} + cluster_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.cluster_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.controller_external_label }} + controller_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.controller_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.product_external_label }} + product_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.product_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.service_external_label }} + service_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.service_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.deployment_external_label }} + deployment_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.deployment_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.team_external_label }} + team_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.team_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.environment_external_label }} + environment_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.environment_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.department_external_label }} + department_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.department_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.statefulset_external_label }} + statefulset_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.statefulset_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.daemonset_external_label }} + daemonset_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.daemonset_external_label }}" + {{- end -}} + {{- if .Values.kubecostProductConfigs.labelMappingConfigs.pod_external_label }} + pod_external_label: "{{ .Values.kubecostProductConfigs.labelMappingConfigs.pod_external_label }}" + {{- end -}} +{{- end -}} +{{- end -}} + {{- if .Values.kubecostProductConfigs.grafanaURL }} + grafanaURL: "{{ .Values.kubecostProductConfigs.grafanaURL }}" + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-account-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-account-template.yaml new file mode 100644 index 000000000..d5e3be3b9 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-account-template.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cost-analyzer.serviceAccountName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-template.yaml new file mode 100644 index 000000000..dd48bb3f8 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-service-template.yaml @@ -0,0 +1,69 @@ +{{- $nginxPort := .Values.service.targetPort | default 9090 -}} +{{- $servicePort := .Values.service.port | default 9090 -}} +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cost-analyzer.serviceName" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + selector: + {{ include "cost-analyzer.selectorLabels" . | nindent 4 }} +{{- if .Values.service -}} +{{- if .Values.service.type }} + type: "{{ .Values.service.type }}" +{{- else }} + type: ClusterIP +{{- end }} +{{- else }} + type: ClusterIP +{{- end }} + ports: + - name: server + port: 9001 + targetPort: 9001 + - name: model + port: 9003 + targetPort: 9003 + - name: frontend + {{- if .Values.kubecostFrontend.tls }} + {{- if .Values.kubecostFrontend.tls.enabled }} + port: 443 + targetPort: 443 + {{- if (eq .Values.service.type "NodePort") }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- else }} + port: {{ $servicePort }} + targetPort: {{ $nginxPort }} + {{- if (eq .Values.service.type "NodePort") }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- end}} + {{- else }} + port: {{ $servicePort }} + targetPort: {{ $nginxPort }} + {{- if (eq .Values.service.type "NodePort") }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.saml }} + {{- if .Values.saml.enabled }} + - name: apiserver + port: 9004 + targetPort: 9004 + {{- end }} + {{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-servicemonitor-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-servicemonitor-template.yaml new file mode 100644 index 000000000..6f79d8cd4 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/cost-analyzer-servicemonitor-template.yaml @@ -0,0 +1,27 @@ +{{- if .Values.serviceMonitor }} +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cost-analyzer.fullname" . }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if .Values.serviceMonitor.additionalLabels }} + {{ toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: model + honorLabels: true + interval: 1m + scrapeTimeout: 10s + path: /metrics + scheme: http + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{ include "cost-analyzer.selectorLabels" . | nindent 6 }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/external-grafana-config-map-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/external-grafana-config-map-template.yaml new file mode 100644 index 000000000..5e81ed359 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/external-grafana-config-map-template.yaml @@ -0,0 +1,10 @@ +{{- if eq .Values.global.grafana.proxy false -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: external-grafana-config-map + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + grafanaURL: {{ .Values.global.grafana.scheme | default "http" }}://{{- .Values.global.grafana.domainName }} +{{- end -}} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-attached-disk-metrics-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-attached-disk-metrics-template.yaml new file mode 100644 index 000000000..6d896a34c --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-attached-disk-metrics-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: attached-disk-metrics-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + attached-disks.json: |- +{{ .Files.Get "attached-disks.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-metrics-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-metrics-template.yaml new file mode 100644 index 000000000..91650c74b --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-metrics-template.yaml @@ -0,0 +1,27 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-metrics-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + cluster-metrics.json: |- +{{ .Files.Get "cluster-metrics.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + + diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-utilization-template.yaml new file mode 100644 index 000000000..8fe764c3e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-cluster-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-utilization-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + cluster-utilization.json: |- +{{ .Files.Get "cluster-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-deployment-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-deployment-utilization-template.yaml new file mode 100644 index 000000000..d16c70bcd --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-deployment-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: deployment-utilization-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + deployment-utilization.json: |- +{{ .Files.Get "deployment-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-label-cost-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-label-cost-utilization-template.yaml new file mode 100644 index 000000000..2372514d4 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-label-cost-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: label-cost-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + label-cost-utilization.json: |- +{{ .Files.Get "label-cost-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-namespace-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-namespace-utilization-template.yaml new file mode 100644 index 000000000..17393dd16 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-namespace-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: namespace-utilization-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + namespace-utilization.json: |- +{{ .Files.Get "namespace-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-node-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-node-utilization-template.yaml new file mode 100644 index 000000000..586a3dfca --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-node-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-utilization-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + node-utilization.json: |- +{{ .Files.Get "node-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-pod-utilization-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-pod-utilization-template.yaml new file mode 100644 index 000000000..92017afb8 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-pod-utilization-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: pod-utilization-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + pod-utilization.json: |- +{{ .Files.Get "pod-utilization.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-prometheus-metrics-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-prometheus-metrics-template.yaml new file mode 100644 index 000000000..32e1fc707 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-dashboard-prometheus-metrics-template.yaml @@ -0,0 +1,25 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.dashboards -}} +{{- if .Values.grafana.sidecar.dashboards.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: prom-benchmark-dashboard + {{- if $.Values.grafana.namespace_dashboards }} + namespace: {{ $.Values.grafana.namespace_dashboards }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- else }} + grafana_dashboard: "1" + {{- end }} +data: + pod-utilization.json: |- +{{ .Files.Get "prom-benchmark.json" | indent 8 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-datasource-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-datasource-template.yaml new file mode 100644 index 000000000..e79d2fa35 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/grafana-datasource-template.yaml @@ -0,0 +1,72 @@ +{{- if .Values.grafana -}} +{{- if .Values.grafana.sidecar -}} +{{- if .Values.grafana.sidecar.datasources -}} +{{- if .Values.grafana.sidecar.datasources.enabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-datasource + {{- if $.Values.grafana.namespace_datasources }} + namespace: {{ $.Values.grafana.namespace_datasources }} + {{- end }} + labels: + {{ include "cost-analyzer.commonLabels" . | nindent 4 }} + {{- if $.Values.grafana.sidecar.datasources.label }} + {{ $.Values.grafana.sidecar.datasources.label }}: "1" + {{- else }} + {{- if .Values.global.grafana.enabled }} + kubecost_grafana_datasource: "1" + {{- else }} + grafana_datasource: "1" + {{- end }} + {{- end }} +data: + {{ default "datasource.yaml" .Values.grafana.sidecar.datasources.dataSourceFilename }}: |- + apiVersion: 1 + datasources: + - access: proxy +{{- if .Values.global.thanos }} +{{- if .Values.global.thanos.enabled }} + name: {{ default "Prometheus" .Values.grafana.sidecar.datasources.dataSourceName }} + isDefault: false +{{- else }} + name: "default-kubecost" +{{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} + isDefault: true +{{- else }} + isDefault: false +{{- end }} +{{- end }} +{{- else }} + name: "default-kubecost" +{{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} + isDefault: true +{{- else }} + isDefault: false +{{- end }} +{{- end }} + type: prometheus +{{- if .Values.global.prometheus.enabled }} + url: http://{{ template "cost-analyzer.prometheus.server.name" . }}.{{ .Release.Namespace }} +{{- else }} + url: {{ .Values.global.prometheus.fqdn }} +{{- end }} +{{- if .Values.global.thanos.enabled }} + - access: proxy + name: "default-kubecost" + type: prometheus +{{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} + isDefault: true +{{- else }} + isDefault: false +{{- end }} +{{- if .Values.global.prometheus.enabled }} + url: http://{{ .Release.Name }}-thanos-query-http.{{ .Release.Namespace }}:{{ .Values.thanos.query.http.port }} +{{ else }} + url: {{ .Values.global.thanos.queryService }} +{{- end }} +{{- end }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-controller-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-controller-template.yaml new file mode 100644 index 000000000..1a947e696 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-controller-template.yaml @@ -0,0 +1,282 @@ +{{- if .Values.clusterController }} +{{- if .Values.clusterController.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubecost.clusterControllerName" . }} + labels: + app: {{ template "kubecost.clusterControllerName" . }} +--- +# +# NOTE: +# The following ClusterRole permissions are only created and assigned for the +# cluster controller feature. They will not be added to any clusters by default. +# +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kubecost.clusterControllerName" . }} + labels: + app: {{ template "kubecost.clusterControllerName" . }} +rules: + - apiGroups: + - kubecost.k8s.io + resources: + - turndownschedules + - turndownschedules/status + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - '' + resources: + - deployments + - nodes + - pods + - resourcequotas + - replicationcontrollers + - limitranges + - pods/eviction + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - '' + resources: + - configmaps + - namespaces + - persistentvolumeclaims + - persistentvolumes + - endpoints + - events + - services + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - apps + resources: + - statefulsets + - deployments + - daemonsets + - replicasets + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - events.k8s.io + resources: + - events + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kubecost.clusterControllerName" . }} + labels: + app: {{ template "kubecost.clusterControllerName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kubecost.clusterControllerName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kubecost.clusterControllerName" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kubecost.clusterControllerName" . }} +spec: + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + selector: + matchLabels: + app: {{ template "kubecost.clusterControllerName" . }} + template: + metadata: + labels: + app: {{ template "kubecost.clusterControllerName" . }} + spec: + containers: + - name: {{ template "kubecost.clusterControllerName" . }} + image: {{ .Values.clusterController.image }} + imagePullPolicy: {{ .Values.clusterController.imagePullPolicy }} + volumeMounts: + - name: cluster-controller-keys + mountPath: /var/keys + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: TURNDOWN_NAMESPACE + value: {{ .Release.Namespace }} + - name: TURNDOWN_DEPLOYMENT + value: {{ template "kubecost.clusterControllerName" . }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/keys/service-key.json + ports: + - name: http-server + containerPort: 9731 + hostPort: 9731 + serviceAccount: {{ template "kubecost.clusterControllerName" . }} + serviceAccountName: {{ template "kubecost.clusterControllerName" . }} + volumes: + - name: cluster-controller-keys + secret: + secretName: {{ .Values.clusterController.secretName | default "cluster-controller-service-key" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kubecost.clusterControllerName" . }}-service +spec: + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 9731 + targetPort: 9731 + selector: + app: {{ template "kubecost.clusterControllerName" . }} +--- +# TurndownSchedule Custom Resource Definition for persistence +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: turndownschedules.kubecost.k8s.io +spec: + group: kubecost.k8s.io + version: v1alpha1 + names: + kind: TurndownSchedule + singular: turndownschedule + plural: turndownschedules + shortNames: + - td + - tds + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + type: object + properties: + start: + type: string + format: date-time + end: + type: string + format: date-time + repeat: + type: string + enum: [none, daily, weekly] + additionalPrinterColumns: + - name: State + type: string + description: The state of the turndownschedule + JSONPath: .status.state + - name: Next Turndown + type: string + description: The next turndown date-time + JSONPath: .status.nextScaleDownTime + - name: Next Turn Up + type: string + description: The next turn up date-time + JSONPath: .status.nextScaleUpTime +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-manager-configmap-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-manager-configmap-template.yaml new file mode 100644 index 000000000..907252f93 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-cluster-manager-configmap-template.yaml @@ -0,0 +1,13 @@ +{{- if .Values.kubecostProductConfigs }} +{{- if .Values.kubecostProductConfigs.clusters }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubecost-clusters + labels: + {{- include "cost-analyzer.commonLabels" . | nindent 4 }} +data: + default-clusters.yaml: | +{{- toYaml .Values.kubecostProductConfigs.clusters | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-priority-class-template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-priority-class-template.yaml new file mode 100644 index 000000000..a098a766e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/kubecost-priority-class-template.yaml @@ -0,0 +1,11 @@ +{{- if .Values.priority }} +{{- if .Values.priority.enabled }} +apiVersion: {{ include "cost-analyzer.priorityClass.apiVersion" . }} +kind: PriorityClass +metadata: + name: {{ template "cost-analyzer.fullname" . }}-priority +value: {{ .Values.priority.value | default "1000000" }} +globalDefault: false +description: "Priority class for scheduling the cost-analyzer pod" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-psp.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-psp.template.yaml new file mode 100644 index 000000000..c6d1c5385 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-psp.template.yaml @@ -0,0 +1,24 @@ +{{- if .Values.networkCosts }} +{{- if .Values.networkCosts.enabled }} +{{- if .Values.networkCosts.podSecurityPolicy }} +{{- if .Values.networkCosts.podSecurityPolicy.enabled }} +apiVersion: {{ include "cost-analyzer.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: kubecost-network-costs +spec: + privileged: true + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + fsGroup: + rule: RunAsAny + volumes: + - '*' +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-role.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-role.template.yaml new file mode 100644 index 000000000..04424a44e --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-role.template.yaml @@ -0,0 +1,22 @@ +{{- if .Values.networkCosts }} +{{- if .Values.networkCosts.enabled }} +{{- if .Values.networkCosts.podSecurityPolicy }} +{{- if .Values.networkCosts.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: kubecost-network-costs + annotations: +{{- if .Values.networkCosts.podSecurityPolicy.annotations }} +{{ toYaml .Values.networkCosts.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - kubecost-network-costs +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-rolebinding.template.yaml b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-rolebinding.template.yaml new file mode 100644 index 000000000..84ab0d1c2 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/templates/network-costs-rolebinding.template.yaml @@ -0,0 +1,20 @@ +{{- if .Values.networkCosts }} +{{- if .Values.networkCosts.enabled }} +{{- if .Values.networkCosts.podSecurityPolicy }} +{{- if .Values.networkCosts.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubecost-network-costs +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubecost-network-costs +subjects: +- kind: ServiceAccount + name: {{ template "cost-analyzer.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubecost/cost-analyzer/1.70.000/values-thanos.yaml b/charts/kubecost/cost-analyzer/1.70.000/values-thanos.yaml new file mode 100644 index 000000000..b0bc33889 --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/values-thanos.yaml @@ -0,0 +1,117 @@ +global: + thanos: + enabled: true + +# For Thanos Installs, Allow Higher Concurrency from Cost-Model +# Still may require tweaking for some installs, but the thanos-query-frontend +# will greatly assist in reduction memory bloat in query. +kubecostModel: + maxQueryConcurrency: 5 + # This configuration is applied to thanos only. Expresses the resolution to + # use for longer query ranges. Options: raw, 5m, 1h - Default: raw + maxSourceResolution: 5m + +prometheus: + server: + extraArgs: + storage.tsdb.min-block-duration: 2h + storage.tsdb.max-block-duration: 2h + storage.tsdb.retention: 2w + extraVolumes: + - name: object-store-volume + secret: + # Ensure this secret name matches thanos.storeSecretName + secretName: kubecost-thanos + enableAdminApi: true + sidecarContainers: + - name: thanos-sidecar + image: thanosio/thanos:v0.15.0 + args: + - sidecar + - --log.level=debug + - --tsdb.path=/data/ + - --prometheus.url=http://127.0.0.1:9090 + - --objstore.config-file=/etc/config/object-store.yaml + # Start of time range limit to serve. Thanos sidecar will serve only metrics, which happened + # later than this value. Option can be a constant time in RFC3339 format or time duration + # relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y. + - --min-time=-3h + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + ports: + - name: sidecar-http + containerPort: 10902 + - name: grpc + containerPort: 10901 + - name: cluster + containerPort: 10900 + volumeMounts: + - name: config-volume + mountPath: /etc/prometheus + - name: storage-volume + mountPath: /data + subPath: "" + - name: object-store-volume + mountPath: /etc/config + +thanos: + store: + enabled: true + grpcSeriesMaxConcurrency: 20 + blockSyncConcurrency: 20 + extraEnv: + - name: GOGC + value: "100" + resources: + requests: + memory: "2.5Gi" + query: + enabled: true + timeout: 3m + # Maximum number of queries processed concurrently by query node. + maxConcurrent: 8 + # Maximum number of select requests made concurrently per a query. + maxConcurrentSelect: 2 + resources: + requests: + memory: "2.5Gi" + autoDownsampling: false + extraEnv: + - name: GOGC + value: "100" + + # Thanos Query Frontend + queryFrontend: + enabled: true + compressResponses: true + # Response Cache Configuration + # Configure either a max size constraint or max items. + responseCache: + enabled: true + # Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied. + maxSize: 1.25GB + # Maximum number of entries in the cache. + maxSizeItems: 0 + # The expiry duration for the cache. + validity: 2m + resources: + requests: + memory: "1.5Gi" + + # Thanos Sidecar Service Discovery + sidecar: + enabled: true + bucket: + enabled: false + compact: + enabled: true + dataVolume: + persistentVolumeClaim: + claimName: compact-data-volume + storage: 100Gi + # This secret name should match the sidecar configured secret name volume + # in the prometheus.server.extraVolumes entry + storeSecretName: kubecost-thanos diff --git a/charts/kubecost/cost-analyzer/1.70.000/values.yaml b/charts/kubecost/cost-analyzer/1.70.000/values.yaml new file mode 100644 index 000000000..29e3d558f --- /dev/null +++ b/charts/kubecost/cost-analyzer/1.70.000/values.yaml @@ -0,0 +1,561 @@ +global: + # zone: cluster.local (use only if your DNS server doesn't live in the same zone as kubecost) + prometheus: + enabled: true # If false, Prometheus will not be installed -- only actively supported on paid Kubecost plans + fqdn: http://cost-analyzer-prometheus-server.default.svc #example fqdn. Ignored if enabled: true + # insecureSkipVerify : false # If true, kubecost will not check the TLS cert of prometheus + # queryServiceBasicAuthSecretName: dbsecret # kubectl create secret generic dbsecret -n kubecost --from-file=USERNAME --from-file=PASSWORD + # queryServiceBearerTokenSecretName: dbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=TOKEN + + # Durable storage option, product key required + thanos: + enabled: false + # queryService: http://thanos-query-frontend-http.kubecost:{{ .Values.thanos.queryFrontend.http.port }} # an address of the thanos query-frontend endpoint, if different from installed thanos + # queryServiceBasicAuthSecretName: mcdbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=USERNAME --from-file=PASSWORD <---enter basic auth credentials like that + # queryServiceBearerTokenSecretName mcdbsecret # kubectl create secret generic mcdbsecret -n kubecost --from-file=TOKEN + # queryOffset: 3h # The offset to apply to all thanos queries in order to achieve syncronization on all cluster block stores + + grafana: + enabled: true # If false, Grafana will not be installed + domainName: cost-analyzer-grafana.default.svc #example grafana domain Ignored if enabled: true + scheme: "http" # http or https, for the domain name above. + proxy: true # If true, the kubecost frontend will route to your grafana through its service endpoint + + notifications: + # Kubecost alerting configuration + # Ref: http://docs.kubecost.com/alerts + alertConfigs: + enabled: false # the example values below are never read unless enabled is set to true + frontendUrl: http://localhost:9090 # optional, used for linkbacks + slackWebhookUrl: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX # optional, used for Slack alerts + globalAlertEmails: + - recipient@example.com + - additionalRecipient@example.com + alerts: + # Daily namespace budget alert on namespace `kubecost` + - type: budget # supported: budget, recurringUpdate + threshold: 50 # optional, required for budget alerts + window: daily # or 1d + aggregation: namespace + filter: kubecost + ownerContact: # optional, overrides globalAlertEmails default + - owner@example.com + - owner2@example.com + # Daily cluster budget alert (clusterCosts alert) on cluster `cluster-one` + - type: budget + threshold: 200.8 # optional, required for budget alerts + window: daily # or 1d + aggregation: cluster + filter: cluster-one + # Recurring weekly update (weeklyUpdate alert) + - type: recurringUpdate + window: weekly # or 7d + aggregation: namespace + filter: '*' + # Recurring weekly namespace update on kubecost namespace + - type: recurringUpdate + window: weekly # or 7d + aggregation: namespace + filter: kubecost + ownerContact: # ownerContact(s) should be the same for the same namespace, otherwise the last namespace alert overwrites + - owner@example.com + - owner2@example.com + alertmanager: # Supply an alertmanager FQDN to receive notifications from the app. + enabled: false # If true, allow kubecost to write to your alertmanager + fqdn: http://cost-analyzer-prometheus-server.default.svc #example fqdn. Ignored if prometheus.enabled: true + + podAnnotations: {} + # iam.amazonaws.com/role: role-arn + +pricingCsv: + enabled: false + location: + provider: "AWS" + region: "us-east-1" + URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI + csvAccessCredentials: pricing-schema-access-secret + +saml: # enterprise key required to use + enabled: false + secretName: "kubecost-authzero" + #metadataSecretName: "kubecost-authzero-metadata" # One of metadataSecretName or idpMetadataURL must be set. defaults to metadataURL if set + idpMetadataURL: "https://dev-elu2z98r.auth0.com/samlp/metadata/c6nY4M37rBP0qSO1IYIqBPPyIPxLS8v2" + appRootURL: "http://localhost:9090" # sample URL + # audienceURI: "http://localhost:9090" # by convention, the same as the appRootURL, but any string uniquely identifying kubecost to your samp IDP. Optional if you follow the convention + rbac: + enabled: false + groups: + - name: admin + enabled: false # if admin is disabled, all SAML users will be able to make configuration changes to the kubecost frontend + assertionName: "http://schemas.auth0.com/userType" # a SAML Assertion, one of whose elements has a value that matches on of the values in assertionValues + assertionValues: + - "admin" + - "superusers" + - name: readonly + enabled: false # if readonly is disabled, all users authorized on SAML will default to readonly + assertionName: "http://schemas.auth0.com/userType" + assertionvalues: + - "readonly" + +# imagePullSecrets: +# - name: "image-pull-secret" + +kubecostChecks: + enabled: true + image: "quay.io/kubecost1/checks" + resources: + requests: + cpu: "20m" + memory: "100Mi" + limits: + cpu: "100m" + memory: "200Mi" + +kubecostFrontend: + image: "gcr.io/kubecost1/frontend" + imagePullPolicy: Always + resources: + requests: + cpu: "10m" + memory: "55Mi" + #limits: + # cpu: "100m" + # memory: "256Mi" + +kubecost: + image: "gcr.io/kubecost1/server" + resources: + requests: + cpu: "100m" + memory: "55Mi" + #limits: + # cpu: "100m" + # memory: "256Mi" + +kubecostModel: + image: "gcr.io/kubecost1/cost-model" + imagePullPolicy: Always + warmCache: true + warmSavingsCache: true + etl: true + # The total number of days the ETL storage will build + etlStoreDurationDays: 120 + maxQueryConcurrency: 5 + # utcOffset represents a timezone in hours and minutes east (+) or west (-) + # of UTC, itself, which is defined as +00:00. + # See the tz database of timezones to look up your local UTC offset: + # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + utcOffset: "+00:00" + resources: + requests: + cpu: "200m" + memory: "55Mi" + #limits: + # cpu: "800m" + # memory: "256Mi" + +ingress: + enabled: false + annotations: + kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + paths: ["/"] # There's no need to route specifically to the pods-- we have an nginx deployed that handles routing + hosts: + - cost-analyzer.local + tls: [] + # - secretName: cost-analyzer-tls + # hosts: + # - cost-analyzer.local + +nodeSelector: {} + +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +affinity: {} + +# If true, creates a PriorityClass to be used by the cost-analyzer pod +priority: + enabled: false + # value: 1000000 + +# If true, enable creation of NetworkPolicy resources. +networkPolicy: + enabled: false + +podSecurityPolicy: + enabled: false + +# Define persistence volume for cost-analyzer +persistentVolume: + size: 0.2Gi + dbSize: 32.0Gi + enabled: true # Note that setting this to false means configurations will be wiped out on pod restart. + # storageClass: "-" # + # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost + +service: + type: ClusterIP + port: 9090 + targetPort: 9090 + # nodePort: + labels: {} + annotations: {} + +# enabling long-term durable storage with Postgres requires an enterprise license +remoteWrite: + postgres: + enabled: false + initImage: "gcr.io/kubecost1/sql-init" + initImagePullPolicy: Always + installLocal: true + remotePostgresAddress: "" # ignored if installing locally + persistentVolume: + size: 200Gi + auth: + password: admin # change me + +prometheus: + extraScrapeConfigs: | + - job_name: kubecost + honor_labels: true + scrape_interval: 1m + scrape_timeout: 10s + metrics_path: /metrics + scheme: http + dns_sd_configs: + - names: + - {{ template "cost-analyzer.serviceName" . }} + type: 'A' + port: 9003 + - job_name: kubecost-networking + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_pod_label_app] + action: keep + regex: {{ template "cost-analyzer.networkCostsName" . }} + server: + # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID + # to use as unique cluster ID in kubecost cost-analyzer deployment. + # This overrides the cluster_id set in prometheus.server.global.external_labels. + # NOTE: This does not affect the external_labels set in prometheus config. + # clusterIDConfigmap: cluster-id-configmap + + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + global: + scrape_interval: 1m + scrape_timeout: 10s + evaluation_interval: 1m + external_labels: + cluster_id: cluster-one # Each cluster should have a unique ID + persistentVolume: + size: 32Gi + enabled: true + extraArgs: + query.max-concurrency: 1 + query.max-samples: 100000000 + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + alertmanager: + # enabled: false + persistentVolume: + enabled: true + nodeExporter: + enabled: true + pushgateway: + enabled: false + persistentVolume: + enabled: true + serverFiles: + # prometheus.yml: # Sample block -- enable if using an in cluster durable store. + # remote_write: + # - url: "http://pgprometheus-adapter:9201/write" + # write_relabel_configs: + # - source_labels: [__name__] + # regex: 'container_.*_allocation|container_.*_allocation_bytes|.*_hourly_cost|kube_pod_container_resource_requests_memory_bytes|container_memory_working_set_bytes|kube_pod_container_resource_requests_cpu_cores|kube_pod_container_resource_requests|pod_pvc_allocation|kube_namespace_labels|kube_pod_labels' + # action: keep + # queue_config: + # max_samples_per_send: 1000 + #remote_read: + # - url: "http://pgprometheus-adapter:9201/read" + rules: + groups: + - name: CPU + rules: + - expr: sum(rate(container_cpu_usage_seconds_total{container_name!=""}[5m])) + record: cluster:cpu_usage:rate5m + - expr: rate(container_cpu_usage_seconds_total{container_name!=""}[5m]) + record: cluster:cpu_usage_nosum:rate5m + - expr: avg(irate(container_cpu_usage_seconds_total{container_name!="POD", container_name!=""}[5m])) by (container_name,pod_name,namespace) + record: kubecost_container_cpu_usage_irate + - expr: sum(container_memory_working_set_bytes{container_name!="POD",container_name!=""}) by (container_name,pod_name,namespace) + record: kubecost_container_memory_working_set_bytes + - expr: sum(container_memory_working_set_bytes{container_name!="POD",container_name!=""}) + record: kubecost_cluster_memory_working_set_bytes + - name: Savings + rules: + - expr: sum(avg(kube_pod_owner{owner_kind!="DaemonSet"}) by (pod) * sum(container_cpu_allocation) by (pod)) + record: kubecost_savings_cpu_allocation + labels: + daemonset: "false" + - expr: sum(avg(kube_pod_owner{owner_kind="DaemonSet"}) by (pod) * sum(container_cpu_allocation) by (pod)) / sum(kube_node_info) + record: kubecost_savings_cpu_allocation + labels: + daemonset: "true" + - expr: sum(avg(kube_pod_owner{owner_kind!="DaemonSet"}) by (pod) * sum(container_memory_allocation_bytes) by (pod)) + record: kubecost_savings_memory_allocation_bytes + labels: + daemonset: "false" + - expr: sum(avg(kube_pod_owner{owner_kind="DaemonSet"}) by (pod) * sum(container_memory_allocation_bytes) by (pod)) / sum(kube_node_info) + record: kubecost_savings_memory_allocation_bytes + labels: + daemonset: "true" + - expr: label_replace(sum(kube_pod_status_phase{phase="Running",namespace!="kube-system"} > 0) by (pod, namespace), "pod_name", "$1", "pod", "(.+)") + record: kubecost_savings_running_pods + - expr: sum(rate(container_cpu_usage_seconds_total{container_name!="",container_name!="POD",instance!=""}[5m])) by (namespace, pod_name, container_name, instance) + record: kubecost_savings_container_cpu_usage_seconds + - expr: sum(container_memory_working_set_bytes{container_name!="",container_name!="POD",instance!=""}) by (namespace, pod_name, container_name, instance) + record: kubecost_savings_container_memory_usage_bytes + - expr: avg(sum(kube_pod_container_resource_requests_cpu_cores{namespace!="kube-system"}) by (pod, namespace, instance)) by (pod, namespace) + record: kubecost_savings_pod_requests_cpu_cores + - expr: avg(sum(kube_pod_container_resource_requests_memory_bytes{namespace!="kube-system"}) by (pod, namespace, instance)) by (pod, namespace) + record: kubecost_savings_pod_requests_memory_bytes + +networkCosts: + enabled: false + podSecurityPolicy: + enabled: false + image: gcr.io/kubecost1/kubecost-network-costs:v13.7 + imagePullPolicy: Always + # Traffic Logging will enable logging the top 5 destinations for each source + # every 30 minutes. + trafficLogging: true + # Port will set both the containerPort and hostPort to this value. + # These must be identical due to network-costs being run on hostNetwork + port: 3001 + resources: {} + #requests: + # cpu: "50m" + # memory: "20Mi" + config: + # Configuration for traffic destinations, including specific classification + # for IPs and CIDR blocks. This configuration will act as an override to the + # automatic classification provided by network-costs. + destinations: + # In Zone contains a list of address/range that will be + # classified as in zone. + in-zone: + # Loopback + - "127.0.0.1" + # IPv4 Link Local Address Space + - "169.254.0.0/16" + # Private Address Ranges in RFC-1918 + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + + # In Region contains a list of address/range that will be + # classified as in region. This is synonymous with cross + # zone traffic, where the regions between source and destinations + # are the same, but the zone is different. + in-region: [] + + # Cross Region contains a list of address/range that will be + # classified as non-internet egress from one region to another. + cross-region: [] + + # Direct Classification specifically maps an ip address or range + # to a region (required) and/or zone (optional). This classification + # takes priority over in-zone, in-region, and cross-region configurations. + direct-classification: [] + # - region: "us-east1" + # zone: "us-east1-c" + # ips: + # - "10.0.0.0/24" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## PriorityClassName + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + priorityClassName: [] + ## PodMonitor + ## Allows scraping of network metrics from a dedicated prometheus operator setup + podMonitor: + enabled: false + additionalLabels: {} + +# Kubecost Deployment Configuration +# Used for HA mode in Business & Enterprise tier +kubecostDeployment: + replicas: 1 + +# Kubecost Cluster Controller for Right Sizing and Cluster Turndown +clusterController: + enabled: false + image: gcr.io/kubecost1/cluster-controller:v0.0.2 + imagePullPolicy: Always + +reporting: + # Kubecost bug report feature: Logs access/collection limited to .Release.Namespace + # Ref: http://docs.kubecost.com/bug-report + logCollection: true + productAnalytics: true + errorReporting: true + valuesReporting: true + +serviceMonitor: + enabled: false + additionalLabels: {} + +prometheusRule: + enabled: false + additionalLabels: {} + +supportNFS: true +initChownDataImage: "busybox" # Supports a fully qualified Docker image eg: registry.hub.docker.com/library/busybox:latest. +initChownData: + resources: {} + #requests: + # cpu: "50m" + # memory: "20Mi" + + +grafana: + # namespace_datasources: kubecost # override the default namespace here + # namespace_dashboards: kubecost # override the default namespace here + sidecar: + dashboards: + enabled: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + datasources: + # dataSourceFilename: foo.yml # If you need to change the name of the datasource file + enabled: true + defaultDatasourceEnabled: false + dataSourceName: default-kubecost + # label that the configmaps with datasources are marked with + label: kubecost_grafana_datasource +# For grafana to be accessible, add the path to root_url. For example, if you run kubecost at www.foo.com:9090/kubecost +# set root_url to "%(protocol)s://%(domain)s:%(http_port)s/kubecost/grafana". No change is necessary here if kubecost runs at a root URL + grafana.ini: + server: + root_url: "%(protocol)s://%(domain)s:%(http_port)s/grafana" +serviceAccount: + create: true # Set this to false if you're bringing your own service account. + annotations: {} + # name: kc-test + +# These configs can also be set from the Settings page in the Kubecost product UI +# Values in this block override config changes in the Settings UI on pod restart +# +# kubecostProductConfigs: +# An optional list of cluster definitions that can be added for frontend access. The local +# cluster is *always* included by default, so this list is for non-local clusters. +# Ref: https://github.com/kubecost/docs/blob/master/multi-cluster.md +# clusters: +# - name: "Cluster A" +# address: http://cluster-a.kubecost.com:9090 +# # Optional authentication credentials - only basic auth is currently supported. +# auth: +# type: basic +# # Secret name should be a secret formatted based on: https://github.com/kubecost/docs/blob/master/ingress-examples.md +# secretName: cluster-a-auth +# # Or pass auth directly as base64 encoded user:pass +# data: YWRtaW46YWRtaW4= +# # Or user and pass directly +# user: admin +# pass: admin +# - name: "Cluster B" +# address: http://cluster-b.kubecost.com:9090 +# defaultModelPricing: # default monthly resource prices, used predominately for on-prem clusters +# CPU: 28.0 +# spotCPU: 4.86 +# RAM: 3.09 +# spotRAM: 0.65 +# GPU: 693.50 +# spotGPU: 225.0 +# storage: 0.04 +# zoneNetworkEgress: 0.01 +# regionNetworkEgress: 0.01 +# internetNetworkEgress: 0.12 +# enabled: true +# # The cluster profile represents a predefined set of parameters to use when calculating savings. +# # Possible values are: [ development, production, high-availability ] +# clusterProfile: production +# customPricesEnabled: false # This makes the default view custom prices-- generally used for on-premises clusters +# spotLabel: lifecycle +# spotLabelValue: Ec2Spot +# gpuLabel: gpu +# gpuLabelValue: true +# awsServiceKeyName: ACCESSKEYID +# awsServiceKeyPassword: fakepassword # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName +# awsSpotDataRegion: us-east-1 +# awsSpotDataBucket: spot-data-feed-s3-bucket +# awsSpotDataPrefix: dev +# athenaProjectID: "530337586277" # The AWS AccountID where the Athena CUR is. Generally your masterpayer account +# athenaBucketName: "s3://aws-athena-query-results-530337586277-us-east-1" +# athenaRegion: us-east-1 +# athenaDatabase: athenacurcfn_athena_test1 +# athenaTable: "athena_test1" +# masterPayerARN: "" +# projectID: "123456789" # Also known as AccountID on AWS -- the current account/project that this instance of Kubecost is deployed on. +# gcpSecretName: gcp-secret # Name of a secret representing the gcp service key +# bigQueryBillingDataDataset: billing_data.gcp_billing_export_v1_01AC9F_74CF1D_5565A2 +# labelMappingConfigs: # names of k8s labels used to designate different allocation concepts +# enabled: true +# owner_label: "owner" +# team_label: "team" +# department_label: "dept" +# product_label: "product" +# environment_label: "env" +# namespace_external_label: "kubernetes_namespace" # external labels are used to map external cloud costs to kubernetes concepts +# cluster_external_label: "kubernetes_cluster" +# controller_external_label: "kubernetes_controller" +# product_external_label: "kubernetes_label_app" +# service_external_label: "kubernetes_service" +# deployment_external_label: "kubernetes_deployment" +# team_external_label: "kubernetes_label_team" +# environment_external_label: "kubernetes_label_env" +# department_external_label: "kubernetes_label_department" +# statefulset_external_label: "kubernetes_statefulset" +# daemonset_external_label: "kubernetes_daemonset" +# pod_external_label: "kubernetes_pod" +# grafanaURL: "" +# clusterName: "" # used for display in Kubecost UI +# currencyCode: "USD" # offical support for USD, CAD, EUR, and CHF +# azureBillingRegion: US # Represents 2-letter region code, e.g. West Europe = NL, Canada = CA. ref: https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes +# azureSubscriptionID: 0bd50fdf-c923-4e1e-850c-196dd3dcc5d3 +# azureClientID: f2ef6f7d-71fb-47c8-b766-8d63a19db017 +# azureTenantID: 72faf3ff-7a3f-4597-b0d9-7b0b201bb23a +# azureClientPassword: fake key # Only use if your values.yaml are stored encrypted. Otherwise provide an existing secret via serviceKeySecretName +# discount: "" # percentage discount applied to compute +# negotiatedDiscount: "" # custom negotiated cloud provider discount +# defaultIdle: false +# serviceKeySecretName: "" # Use an existing AWS or Azure secret with format as in aws-service-key-secret.yaml or azure-service-key-secret.yaml. Leave blank if using createServiceKeySecret +# createServiceKeySecret: true # Creates a secret representing your cloud service key based on data in values.yaml. If you are storing unencrypted values, add a secret manually +# sharedNamespaces: "" # namespaces with shared workloads, example value: "kube-system\,ingress-nginx\,kubecost\,monitoring" +# productKey: # apply business or enterprise product license +# key: "" +# enabled: false +# secretname: productkeysecret # create a secret out of a file named productkey.json of format { "key": "kc-b1325234" } diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/.helmignore b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/Chart.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/Chart.yaml new file mode 100644 index 000000000..3f57d6b38 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: nutanix-csi-storage +apiVersion: v1 +appVersion: 2.3.1 +description: A Helm chart for installing Nutanix CSI Volume Driver +home: https://github.com/nutanix/helm +icon: https://avatars2.githubusercontent.com/u/6165865?s=200&v=4 +keywords: +- Nutanix +- Storage +- Volumes +- Files +- StorageClass +- CentOS +- Ubuntu +kubeVersion: '>= 1.13.0' +maintainers: +- name: tuxtof +name: nutanix-csi-storage +version: 2.3.100 diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/README.md b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/README.md new file mode 100644 index 000000000..97f4c99fc --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/README.md @@ -0,0 +1,93 @@ +# Nutanix CSI Volume Driver Helm chart + +## Introduction + +The Container Storage Interface (CSI) Volume Driver for Kubernetes leverages Nutanix Volumes and Nutanix Files to provide scalable and persistent storage for stateful applications. + +When Files is used for persistent storage, applications on multiple pods can access the same storage, and also have the benefit of multi-pod read and write access. + +## Important notice + +If you plan to update an existing Nutanix CSI deployement from 1.x to 2.x with this Chart, you need first deploy manually the CRD present here https://github.com/nutanix/csi-plugin/tree/master/deploy/Centos/crd. + +Please note that starting with v2.2.0, Nutanix CSI driver has changed format of driver name from com.nutanix.csi to csi.nutanix.com. All deployment yamls uses this new driver name format. However, if you are upgrading the CSI driver then you should continue to use old driver name com.nutanix.csi by setting `legacy` parameter to `true`. If not existing PVC/PV will not work with the new driver name. + +## Nutanix CSI driver documentation +https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_3:CSI-Volume-Driver-v2_3 + +## Features list + +- Nutanix CSI Driver v2.3.1 +- Nutanix Volumes support +- Nutanix Files support +- Volume resize support ( beta in Kubernetes >= 1.16.0 ) +- Volume clone ( beta Kubernetes >= 1.16.0 ) +- Volume snapshot and Restore ( beta Kubernetes >= 1.17.0 ) +- IP Address Whitelisting +- LVM Volume supporting multi vdisks volume group +- NFS dynamic share provisioning +- iSCSI Auto CHAP Authentication +- OS independence + +## Prerequisites + +- Kubernetes 1.13 or later +- Kubernetes worker nodes must have the iSCSI package installed (Nutanix Volumes only) +- This chart have been validated on CentOS 7 and Ubuntu 18.04/20.04, but the new architecture enables easy portability to other distributions. + +## Installing the Chart + +To install the chart with the name `nutanix-csi`: + +```console +helm repo add nutanix https://nutanix.github.io/helm/ + +helm install nutanix-csi nutanix/nutanix-csi-storage -n +``` + +## Uninstalling the Chart + +To uninstall/delete the `nutanix-csi` deployment: + +```console +helm delete nutanix-csi -n +``` + +## Configuration + +The following table lists the configurable parameters of the Nutanix-CSI chart and their default values. + +| Parameter | Description | Default | +|------------------------------|----------------------------------------|--------------------------------| +| `legacy` | use old reverse notation for CSI driver name | `false` | +| `volumeClass` | Activate Nutanix Volumes Storage Class | `true` | +| `fileClass` | Activate Nutanix Files Storage Class | `false` | +| `dynamicFileClass` | Activate Nutanix Dynamic Files Storage Class | `false` | +| `defaultStorageClass` | Choose your default Storage Class (none, volume, file, dynfile) | `none`| +| `prismEndPoint` | Cluster Virtual IP Address |`10.0.0.1`| +| `dataServiceEndPoint` | Prism data service IP |`10.0.0.2`| +| `username` | name used for the admin role (if created) |`admin`| +| `password` | password for the admin role (if created) |`nutanix/4u`| +| `secretName` | name of the secret to use for admin role| `ntnx-secret`| +| `createSecret` | create secret for admin role (if false use existing)| `true`| +| `storageContainer` | Nutanix storage container name | `default`| +| `fsType` | type of file system you are using (ext4, xfs) |`xfs`| +| `fileHost` | NFS server IP address | `10.0.0.3`| +| `filePath` | path of the NFS share |`share`| +| `fileServerName` | name of the Nutanix FIle Server | `file`| +| `nodeSelector` | add nodeSelector to pods spec | | +| `tolerations` | add tolerations to pods spec | | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install` or provide a a file whit `-f value.yaml`. + +Example: + +```console +helm install nutanix-csi nutanix/nutanix-csi-storage --set prismEndPoint=X.X.X.X --set dataServiceEndPoint=Y.Y.Y.Y --set username=admin --set password=xxxxxxxxx --set storageContainer=container_name --set fsType=xfs --set defaultStorageClass=volume --set os=centos +``` + +or + +```console +helm install nutanix-csi nutanix/nutanix-csi-storage -f value.yaml +``` diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/app-readme.md b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/app-readme.md new file mode 100644 index 000000000..bffca7493 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/app-readme.md @@ -0,0 +1 @@ +A Helm chart for installing Nutanix CSI Volume/File Storage Driver diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotclasses.yaml new file mode 100644 index 000000000..4aa980cc7 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotclasses.yaml @@ -0,0 +1,85 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .driver + name: Driver + type: string + - JSONPath: .deletionPolicy + description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass + should be deleted when its bound VolumeSnapshot is deleted. + name: DeletionPolicy + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + preserveUnknownFields: false + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotcontents.yaml new file mode 100644 index 000000000..34c51ad62 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshotcontents.yaml @@ -0,0 +1,233 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot in bytes + name: RestoreSize + type: integer + - JSONPath: .spec.deletionPolicy + description: Determines whether this VolumeSnapshotContent and its physical snapshot + on the underlying storage system should be deleted when its bound VolumeSnapshot + is deleted. + name: DeletionPolicy + type: string + - JSONPath: .spec.driver + description: Name of the CSI driver used to create the physical snapshot on the + underlying storage system. + name: Driver + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: Name of the VolumeSnapshotClass to which this snapshot belongs. + name: VolumeSnapshotClass + type: string + - JSONPath: .spec.volumeSnapshotRef.name + description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + name: VolumeSnapshot + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshots.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshots.yaml new file mode 100644 index 000000000..483706f16 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/crds/snapshot.storage.k8s.io_volumesnapshots.yaml @@ -0,0 +1,188 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .spec.source.persistentVolumeClaimName + description: Name of the source PVC from where a dynamically taken snapshot will + be created. + name: SourcePVC + type: string + - JSONPath: .spec.source.volumeSnapshotContentName + description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + name: SourceSnapshotContent + type: string + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot. + name: RestoreSize + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + name: SnapshotClass + type: string + - JSONPath: .status.boundVolumeSnapshotContentName + description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + name: SnapshotContent + type: string + - JSONPath: .status.creationTime + description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + name: CreationTime + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + anyOf: + - type: integer + - type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/questions.yml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/questions.yml new file mode 100644 index 000000000..a8f3d2b16 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/questions.yml @@ -0,0 +1,116 @@ +questions: + - variable: volumeClass + label: "Volumes Storage Class" + type: boolean + default: true + description: "Activate Nutanix Volumes Storage Class" + group: "global Settings" + - variable: fileClass + label: "Files Storage Class" + type: boolean + default: false + description: "Activate Nutanix Files Storage Class" + group: "global Settings" + - variable: dynamicFileClass + label: "Dynamic Files Storage Class" + type: boolean + default: false + description: "Activate Nutanix Files Storage Class with dynamic share provisioning" + group: "global Settings" + - variable: legacy + label: "Driver Name Legacy mode" + type: boolean + default: false + description: "Set to True to continue to use old driver name in case of initial install with chart < 2.2.0" + group: "global Settings" + - variable: defaultStorageClass + label: "Default Storage Class" + type: enum + default: "none" + options: ["none", "volume", "file", "dynfile"] + description: "Select the default Storage Class you want" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true||fileClass=true" + + - variable: prismEndPoint + label: "Prism Endpoint" + type: string + required: true + description: "Please specify the cluster virtual address" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + - variable: username + label: "Username" + type: string + required: true + description: "Specify username with cluster admin permission" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + - variable: password + label: "Password" + type: password + required: true + description: "Specify password of the user" + group: "global Settings" + show_if: "volumeClass=true||dynamicFileClass=true" + + - variable: lvmVolume + label: "LVM Volume" + type: boolean + default: false + description: "Activate LVM to support multi vdisks volume group for PV" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: dataServiceEndPoint + label: "Data Service Endpoint" + type: string + required: true + description: "Please specify the ISCSI data services address" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: storageContainer + label: "Storage Container" + type: string + required: true + description: "Specify Nutanix container name where the Persistent Volume will be stored" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: fsType + label: "Filesystem" + type: enum + options: ["xfs", "ext4"] + description: "Select the filesystem for the Persistent Volume" + group: "Nutanix Volumes Settings" + show_if: "volumeClass=true" + - variable: lvmDisks + label: "LVM Disks" + type: int + required: true + default: "4" + min: 1 + max: 8 + description: "Number of vdisk for each PV" + group: "Nutanix Volumes Settings" + show_if: "lvmVolume=true&&volumeClass=true" + + - variable: fileHost + label: "File Server Address" + type: string + required: true + description: "Specify Nutanix Files address" + group: "Nutanix Files Settings" + show_if: "fileClass=true" + - variable: filePath + label: "Export share" + type: string + required: true + description: "Specify Nutanix Files share path" + group: "Nutanix Files Settings" + show_if: "fileClass=true" + - variable: fileServerName + label: "NFS File Server Name" + type: string + required: true + description: "Specify Nutanix Files server name" + group: "Nutanix Files Settings" + show_if: "dynamicFileClass=true" diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/NOTES.txt b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/NOTES.txt new file mode 100644 index 000000000..3921d167a --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/NOTES.txt @@ -0,0 +1,3 @@ +Driver name: {{ include "nutanix-csi-storage.drivername" . }} + +Nutanix CSI provider was deployed in namespace {{ .Release.Namespace }} diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/_helpers.tpl b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/_helpers.tpl new file mode 100644 index 000000000..5fe53b26a --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nutanix-csi-storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nutanix-csi-storage.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nutanix-csi-storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create CSI driver name. +*/}} +{{- define "nutanix-csi-storage.drivername" -}} +{{- if .Values.legacy -}} +com.nutanix.csi +{{- else -}} +csi.nutanix.com +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/csi-driver.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/csi-driver.yaml new file mode 100644 index 000000000..fa56a6a1c --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/csi-driver.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: {{ include "nutanix-csi-storage.drivername" . }} +spec: + attachRequired: false + podInfoOnMount: true \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-cs-scc.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-cs-scc.yaml new file mode 100644 index 000000000..89a543a54 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-cs-scc.yaml @@ -0,0 +1,30 @@ +{{- if eq .Values.os "openshift4"}} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: ntnx-csi-scc +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: [] +defaultAddCapabilities: [] +fsGroup: + type: RunAsAny +groups: [] +priority: +readOnlyRootFilesystem: false +requiredDropCapabilities: [] +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:{{ .Release.Namespace }}:csi-provisioner + - system:serviceaccount:{{ .Release.Namespace }}:csi-node-ntnx-plugin +{{- end}} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-node.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-node.yaml new file mode 100644 index 000000000..59519e9f0 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-node.yaml @@ -0,0 +1,128 @@ +# Copyright 2019 Nutanix Inc +# +# example usage: kubectl create -f + +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: csi-node-ntnx-plugin + template: + metadata: + labels: + app: csi-node-ntnx-plugin + spec: + serviceAccount: csi-node-ntnx-plugin + hostNetwork: true + containers: + - name: driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/{{ include "nutanix-csi-storage.drivername" . }}/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi/ + - name: registration-dir + mountPath: /registration + - name: csi-node-ntnx-plugin + securityContext: + privileged: true + allowPrivilegeEscalation: true + image: quay.io/karbon/ntnx-csi:v2.3.1 + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(NODE_ID)" + - "--drivername={{ include "nutanix-csi-storage.drivername" . }}" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + mountPath: /var/lib/kubelet + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - mountPath: /dev + name: device-dir + - mountPath: /etc/iscsi + name: iscsi-dir + - mountPath: /host + name: root-dir + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + failureThreshold: 3 + - name: liveness-probe + imagePullPolicy: Always + volumeMounts: + - mountPath: /csi + name: plugin-dir + image: quay.io/k8scsi/livenessprobe:v1.1.0 + args: + - --csi-address=/csi/csi.sock + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/{{ include "nutanix-csi-storage.drivername" . }}/ + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: device-dir + hostPath: + path: /dev + - name: iscsi-dir + hostPath: + path: /etc/iscsi + type: Directory + - name: root-dir + hostPath: + path: / + type: Directory diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-provisioner.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-provisioner.yaml new file mode 100644 index 000000000..5de6f7a29 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-provisioner.yaml @@ -0,0 +1,111 @@ +# Copyright 2019 Nutanix Inc +# +# example usage: kubectl create -f + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-provisioner-ntnx-plugin + namespace: {{ .Release.Namespace }} +spec: + serviceName: csi-provisioner-ntnx-plugin + replicas: 1 + selector: + matchLabels: + app: csi-provisioner-ntnx-plugin + template: + metadata: + labels: + app: csi-provisioner-ntnx-plugin + spec: + serviceAccount: csi-provisioner + containers: + - name: csi-provisioner + image: quay.io/k8scsi/csi-provisioner:v1.5.0 + args: + - --provisioner={{ include "nutanix-csi-storage.drivername" . }} + - --csi-address=$(ADDRESS) + - --timeout=60s + - --v=5 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: IfNotPresent + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.3.0 + args: + - --v=5 + - --csi-address=$(ADDRESS) + - --leader-election=false + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: IfNotPresent + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v2.1.0 + args: + - --csi-address=$(ADDRESS) + - --leader-election=false + - --logtostderr=true + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: ntnx-csi-plugin + image: quay.io/karbon/ntnx-csi:v2.3.1 + securityContext: + allowPrivilegeEscalation: true + privileged: true + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(NODE_ID)" + - "--drivername={{ include "nutanix-csi-storage.drivername" . }}" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + failureThreshold: 3 + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: quay.io/k8scsi/livenessprobe:v1.1.0 + args: + - --csi-address=/csi/csi.sock + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - emptyDir: {} + name: socket-dir diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-rbac.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-rbac.yaml new file mode 100644 index 000000000..1b79eed11 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-csi-rbac.yaml @@ -0,0 +1,124 @@ +# Copyright 2018 Nutanix Inc +# +# Configuration to deploy the Nutanix CSI driver +# +# example usage: kubectl create -f + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-provisioner + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-provisioner-runner + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: csi-provisioner + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +# needed for StatefulSet +kind: Service +apiVersion: v1 +metadata: + name: csi-provisioner-ntnx-plugin + namespace: {{ .Release.Namespace }} + labels: + app: csi-provisioner-ntnx-plugin +spec: + selector: + app: csi-provisioner-ntnx-plugin + ports: + - name: dummy + port: 12345 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-node-runner + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-node-role + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: csi-node-ntnx-plugin + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: csi-node-runner + apiGroup: rbac.authorization.k8s.io + diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-secret.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-secret.yaml new file mode 100644 index 000000000..dbf22bdfc --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/ntnx-secret.yaml @@ -0,0 +1,11 @@ +{{- if eq .Values.createSecret true }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.secretName }} + namespace: {{ .Release.Namespace }} +data: + # base64 encoded prism-ip:prism-port:admin:password. + # E.g.: echo -n "10.83.0.91:9440:admin:mypassword" | base64 + key: {{ printf "%s:9440:%s:%s" .Values.prismEndPoint .Values.username .Values.password | b64enc}} +{{- end }} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/sc.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/sc.yaml new file mode 100644 index 000000000..b7ba803e2 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/sc.yaml @@ -0,0 +1,73 @@ +{{- if eq .Values.volumeClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: nutanix-volume +{{- if eq .Values.defaultStorageClass "volume" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixVolumes + csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/node-publish-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/node-publish-secret-namespace: {{ .Release.Namespace }} + csi.storage.k8s.io/controller-expand-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/controller-expand-secret-namespace: {{ .Release.Namespace }} + dataServiceEndPoint: {{ .Values.dataServiceEndPoint }} + storageContainer: {{ .Values.storageContainer }} + csi.storage.k8s.io/fstype: {{ .Values.fsType }} +{{- if eq .Values.lvmVolume true }} + isLVMVolume: "true" + numLVMDisks: {{ quote .Values.lvmDisks }} +{{- end }} +allowVolumeExpansion: true +reclaimPolicy: Delete +--- +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: nutanix-snapshot-class +driver: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixVolumes + csi.storage.k8s.io/snapshotter-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/snapshotter-secret-namespace: {{ .Release.Namespace }} +deletionPolicy: Delete +{{- end }} +--- +{{- if eq .Values.fileClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: nutanix-file +{{- if eq .Values.defaultStorageClass "file" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixFiles + nfsServer: {{ .Values.fileHost }} + nfsPath: {{ .Values.filePath }} +{{- end }} +--- +{{- if eq .Values.dynamicFileClass true }} +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: nutanix-dynamicfile +{{- if eq .Values.defaultStorageClass "dynfile" }} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{{- end }} +provisioner: {{ include "nutanix-csi-storage.drivername" . }} +parameters: + storageType: NutanixFiles + dynamicProv: ENABLED + nfsServerName: {{ .Values.fileServerName }} + csi.storage.k8s.io/provisioner-secret-name: {{ .Values.secretName }} + csi.storage.k8s.io/provisioner-secret-namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-rbac.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-rbac.yaml new file mode 100644 index 000000000..eb6bb088c --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-rbac.yaml @@ -0,0 +1,78 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: snapshot-controller + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-runner + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-role + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: snapshot-controller-leaderelection + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: snapshot-controller + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io + diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-setup.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-setup.yaml new file mode 100644 index 000000000..57cf273ae --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/templates/snapshot-controller-setup.yaml @@ -0,0 +1,24 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: {{ .Release.Namespace }} +spec: + serviceName: "snapshot-controller" + replicas: 1 + selector: + matchLabels: + app: snapshot-controller + template: + metadata: + labels: + app: snapshot-controller + spec: + serviceAccount: snapshot-controller + containers: + - name: snapshot-controller + image: quay.io/k8scsi/snapshot-controller:v2.0.1 + args: + - "--v=5" + - "--leader-election=false" + imagePullPolicy: Always diff --git a/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/values.yaml b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/values.yaml new file mode 100644 index 000000000..b8be1d667 --- /dev/null +++ b/charts/nutanix-csi-storage/nutanix-csi-storage/2.3.100/values.yaml @@ -0,0 +1,67 @@ +# Default values for nutanix-csi-storage. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# parameters + +# Legacy mode +# +# if legacy set to true we keep the old reverse domain notation for CSI driver name (com.nutanix.csi). +# need to be set to true only if upgrade and initialy installed with helm package before 2.2.x +legacy: false + +# OS settings +# +# Starting v2.3.1 CSI driver is OS independent, this value is deprecated +os: none + + +# Storage Class settings +# +# choose for wich mode (Volume, File, Dynamic File) storageclass need to be created +volumeClass: true +fileClass: false +dynamicFileClass: false + + +# Default Storage Class settings +# +# Decide wich storageclass will be the default +# value are: node, volume, file, dynfile +defaultStorageClass: none + +# Nutanix Prism Elements settings +# +# Allow dynamic creation of Volumes and Fileshare +# needed if volumeClass or dynamicFileClass is set to true + +prismEndPoint: 10.0.0.1 + +username: admin +password: nutanix/4u + +secretName: ntnx-secret + +# Nutanix Prism Elements Existing Secret +# +# if set to false a new secret will not be created +createSecret: true + + +# Volumes Settings +# +dataServiceEndPoint: 10.0.0.2 +storageContainer: default +fsType: xfs + +lvmVolume: false +lvmDisks: 4 + +# Files Settings +# +fileHost: 10.0.0.3 +filePath: share + +# Dynamic Files Settings +# +fileServerName: file diff --git a/charts/openebs/openebs/1.12.300/Chart.yaml b/charts/openebs/openebs/1.12.300/Chart.yaml new file mode 100644 index 000000000..bd1066ace --- /dev/null +++ b/charts/openebs/openebs/1.12.300/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: openebs +apiVersion: v1 +appVersion: 1.12.0 +description: Containerized Storage for Containers +home: http://www.openebs.io/ +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png +keywords: +- cloud-native-storage +- block-storage +- iSCSI +- storage +maintainers: +- email: kiran.mova@openebs.io + name: kmova +- email: prateek.pandey@openebs.io + name: prateekpandey14 +name: openebs +sources: +- https://github.com/openebs/openebs +version: 1.12.300 diff --git a/charts/openebs/openebs/1.12.300/OWNERS b/charts/openebs/openebs/1.12.300/OWNERS new file mode 100644 index 000000000..874423e12 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/OWNERS @@ -0,0 +1,6 @@ +approvers: +- kmova +- prateekpandey14 +reviewers: +- kmova +- prateekpandey14 diff --git a/charts/openebs/openebs/1.12.300/README.md b/charts/openebs/openebs/1.12.300/README.md new file mode 100644 index 000000000..a97908152 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/README.md @@ -0,0 +1,135 @@ +# OpenEBS Helm Chart + +[OpenEBS](https://github.com/openebs/openebs) is an *open source storage platform* that provides persistent and containerized block storage for DevOps and container environments. +OpenEBS provides multiple storage engines that can be plugged in easily. A common pattern is the use of OpenEBS to deliver Dynamic LocalPV for those applications and workloads that want to access disks and cloud volumes directly. + +OpenEBS can be deployed on any Kubernetes cluster - either in cloud, on-premise or developer laptop (minikube). OpenEBS itself is deployed as just another container on your cluster, and enables storage services that can be designated on a per pod, application, cluster or container level. + +## Introduction + +This chart bootstraps OpenEBS deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Quickstart and documentation + +You can run OpenEBS on any Kubernetes 1.13+ cluster in a matter of seconds. See the [Quickstart Guide to OpenEBS](https://docs.openebs.io/docs/next/quickstart.html) for detailed instructions. + +For more comprehensive documentation, start with the [Welcome to OpenEBS](https://docs.openebs.io/docs/next/overview.html) docs. + +## Prerequisites + +- Kubernetes 1.13+ with RBAC enabled +- iSCSI PV support in the underlying infrastructure + +## Adding OpenEBS Helm repository + +Before installing OpenEBS Helm charts, you need to add the [OpenEBS Helm repository](https://openebs.github.io/charts) to your Helm client. + +```bash +helm repo add openebs https://openebs.github.io/charts +``` + +## Installing OpenEBS + +```bash +helm install --namespace openebs openebs/openebs +``` + +## Installing OpenEBS with the release name + +```bash +helm install --name `my-release` --namespace openebs openebs/openebs +``` + +## To uninstall/delete instance with release name + +```bash +helm ls --all +helm delete `my-release` +``` + +## Configuration + +The following table lists the configurable parameters of the OpenEBS chart and their default values. + +| Parameter | Description | Default | +| ----------------------------------------| --------------------------------------------- | ----------------------------------------- | +| `rbac.create` | Enable RBAC Resources | `true` | +| `rbac.pspEnabled` | Create pod security policy resources | `false` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Specify which docker registry to use | `""` | +| `apiserver.enabled` | Enable API Server | `true` | +| `apiserver.image` | Image for API Server | `openebs/m-apiserver` | +| `apiserver.imageTag` | Image Tag for API Server | `1.12.0` | +| `apiserver.replicas` | Number of API Server Replicas | `1` | +| `apiserver.sparse.enabled` | Create Sparse Pool based on Sparsefile | `false` | +| `provisioner.enabled` | Enable Provisioner | `true` | +| `provisioner.image` | Image for Provisioner | `openebs/openebs-k8s-provisioner` | +| `provisioner.imageTag` | Image Tag for Provisioner | `1.12.0` | +| `provisioner.replicas` | Number of Provisioner Replicas | `1` | +| `localprovisioner.enabled` | Enable localProvisioner | `true` | +| `localprovisioner.image` | Image for localProvisioner | `openebs/provisioner-localpv` | +| `localprovisioner.imageTag` | Image Tag for localProvisioner | `1.12.0` | +| `localprovisioner.replicas` | Number of localProvisioner Replicas | `1` | +| `localprovisioner.basePath` | BasePath for hostPath volumes on Nodes | `/var/openebs/local` | +| `webhook.enabled` | Enable admission server | `true` | +| `webhook.image` | Image for admission server | `openebs/admission-server` | +| `webhook.imageTag` | Image Tag for admission server | `1.12.0` | +| `webhook.replicas` | Number of admission server Replicas | `1` | +| `webhook.hostNetwork` | Use hostNetwork in admission server | `false` | +| `snapshotOperator.enabled` | Enable Snapshot Provisioner | `true` | +| `snapshotOperator.provisioner.image` | Image for Snapshot Provisioner | `openebs/snapshot-provisioner` | +| `snapshotOperator.provisioner.imageTag` | Image Tag for Snapshot Provisioner | `1.12.0` | +| `snapshotOperator.controller.image` | Image for Snapshot Controller | `openebs/snapshot-controller` | +| `snapshotOperator.controller.imageTag` | Image Tag for Snapshot Controller | `1.12.0` | +| `snapshotOperator.replicas` | Number of Snapshot Operator Replicas | `1` | +| `ndm.enabled` | Enable Node Disk Manager | `true` | +| `ndm.image` | Image for Node Disk Manager | `openebs/node-disk-manager` | +| `ndm.imageTag` | Image Tag for Node Disk Manager | `0.7.0` | +| `ndm.sparse.path` | Directory where Sparse files are created | `/var/openebs/sparse` | +| `ndm.sparse.size` | Size of the sparse file in bytes | `10737418240` | +| `ndm.sparse.count` | Number of sparse files to be created | `0` | +| `ndm.filters.enableOsDiskExcludeFilter` | Enable filters of OS disk exclude | `true` | +| `ndm.filters.enableVendorFilter` | Enable filters of venders | `true` | +| `ndm.filters.excludeVendors` | Exclude devices with specified vendor | `CLOUDBYT,OpenEBS` | +| `ndm.filters.enablePathFilter` | Enable filters of paths | `true` | +| `ndm.filters.includePaths` | Include devices with specified path patterns | `""` | +| `ndm.filters.excludePaths` | Exclude devices with specified path patterns | `loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd`| +| `ndm.probes.enableSeachest` | Enable Seachest probe for NDM | `false` | +| `ndmOperator.enabled` | Enable NDM Operator | `true` | +| `ndmOperator.image` | Image for NDM Operator | `openebs/node-disk-operator` | +| `ndmOperator.imageTag` | Image Tag for NDM Operator | `0.7.0` | +| `jiva.image` | Image for Jiva | `openebs/jiva` | +| `jiva.imageTag` | Image Tag for Jiva | `1.12.0` | +| `jiva.replicas` | Number of Jiva Replicas | `3` | +| `jiva.defaultStoragePath` | hostpath used by default Jiva StorageClass | `/var/openebs` | +| `cstor.pool.image` | Image for cStor Pool | `openebs/cstor-pool` | +| `cstor.pool.imageTag` | Image Tag for cStor Pool | `1.12.0` | +| `cstor.poolMgmt.image` | Image for cStor Pool Management | `openebs/cstor-pool-mgmt` | +| `cstor.poolMgmt.imageTag` | Image Tag for cStor Pool Management | `1.12.0` | +| `cstor.target.image` | Image for cStor Target | `openebs/cstor-istgt` | +| `cstor.target.imageTag` | Image Tag for cStor Target | `1.12.0` | +| `cstor.volumeMgmt.image` | Image for cStor Volume Management | `openebs/cstor-volume-mgmt` | +| `cstor.volumeMgmt.imageTag` | Image Tag for cStor Volume Management | `1.12.0` | +| `helper.image` | Image for helper | `openebs/linux-utils` | +| `helper.imageTag` | Image Tag for helper | `1.12.0` | +| `featureGates.enabled` | Enable feature gates for OpenEBS | `false` | +| `featureGates.GPTBasedUUID.enabled` | Enable GPT based UUID generation in NDM | `false` | +| `crd.enableInstall` | Enable installation of CRDs by OpenEBS | `true` | +| `policies.monitoring.image` | Image for Prometheus Exporter | `openebs/m-exporter` | +| `policies.monitoring.imageTag` | Image Tag for Prometheus Exporter | `1.12.0` | +| `analytics.enabled` | Enable sending stats to Google Analytics | `true` | +| `analytics.pingInterval` | Duration(hours) between sending ping stat | `24h` | +| `defaultStorageConfig.enabled` | Enable default storage class installation | `true` | +| `varDirectoryPath.baseDir` | To store debug info of OpenEBS containers | `/var/openebs` | +| `healthCheck.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `healthCheck.periodSeconds` | How often to perform the liveness probe | `60` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +helm install --name openebs -f values.yaml openebs/openebs +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/charts/openebs/openebs/1.12.300/app-readme.md b/charts/openebs/openebs/1.12.300/app-readme.md new file mode 100644 index 000000000..e6a3d5f48 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/app-readme.md @@ -0,0 +1,10 @@ +# OpenEBS + +OpenEBS is an open source storage platform that provides persistent container attached, cloud-native block storage for DevOps and for Kubernetes environments. + +OpenEBS allows you to treat your persistent workload containers, such as DBs on containers, just like other containers. OpenEBS itself is deployed as just another container on your host and enables storage services that can be designated on a per pod, application, cluster or container level, including: +- Data persistence across nodes, dramatically reducing time spent rebuilding Cassandra rings for example. +- Synchronization of data across availability zones and cloud providers. +- Use of commodity hardware plus a container engine to deliver so called container attached block storage. +- Integration with Kubernetes, so developer and application intent flows into OpenEBS configurations automatically. +- Management of tiering to and from S3 and other targets. diff --git a/charts/openebs/openebs/1.12.300/questions.yml b/charts/openebs/openebs/1.12.300/questions.yml new file mode 100644 index 000000000..c0d4641cb --- /dev/null +++ b/charts/openebs/openebs/1.12.300/questions.yml @@ -0,0 +1,200 @@ +questions: +- variable: defaultImage + default: true + description: "Use default OpenEBS images" + label: Use Default Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: apiserver.image + default: "openebs/m-apiserver" + description: "Default API Server image for OpenEBS" + type: string + label: API Server Image + - variable: apiserver.imageTag + default: "1.12.0" + description: "The image tag of API Server image" + type: string + label: Image Tag For OpenEBS API Server Image + - variable: provisioner.image + default: "openebs/openebs-k8s-provisioner" + description: "Default K8s Provisioner image for OpenEBS" + type: string + label: Provisioner Image + - variable: provisioner.imageTag + default: "1.12.0" + description: "The image tag of Provisioner image" + type: string + label: Image Tag For Provisioner Image + - variable: snapshotOperator.controller.image + default: "openebs/snapshot-controller" + description: "Default Snapshot Controller image for OpenEBS" + type: string + label: Snapshot Controller Image + - variable: snapshotOperator.controller.imageTag + default: "1.12.0" + description: "The image tag of Snapshot Controller image" + type: string + label: Image Tag For OpenEBS Snapshot Controller Image + - variable: snapshotOperator.provisioner.image + default: "openebs/snapshot-provisioner" + description: "Default Snapshot Provisioner image for OpenEBS" + type: string + label: Snapshot Provisioner Image + - variable: snapshotOperator.provisioner.imageTag + default: "1.12.0" + description: "The image tag of Snapshot Provisioner image" + type: string + label: Image Tag For OpenEBS Snapshot Provisioner Image + - variable: ndm.image + default: "openebs/node-disk-manager" + description: "Default NDM image" + type: string + label: Node Disk Manager Image + - variable: ndm.imageTag + default: "0.7.0" + description: "The image tag of NDM image" + type: string + label: Image Tag For Node Disk Manager Image + - variable: ndmOperator.image + default: "openebs/node-disk-operator" + description: "Default NDO image" + type: string + label: Node Disk Operator Image + - variable: ndmOperator.imageTag + default: "0.7.0" + description: "The image tag of NDO image" + type: string + label: Image Tag For Node Disk Manager Image + - variable: jiva.image + default: "openebs/jiva" + description: "Default Jiva Storage Engine image for OpenEBS" + type: string + label: Jiva Storage Engine Image + - variable: jiva.imageTag + default: "1.12.0" + description: "The image tag of Jiva image" + type: string + label: Image Tag For OpenEBS Jiva Storage Engine Image + - variable: cstor.pool.image + default: "openebs/cstor-pool" + description: "Default cStor Storage Engine Pool image for OpenEBS" + type: string + label: cStor Storage Engine Pool Image + - variable: cstor.pool.imageTag + default: "1.12.0" + description: "The image tag of cStor Storage Engine Pool image" + type: string + label: Image Tag For OpenEBS cStor Storage Engine Pool Image + - variable: cstor.poolMgmt.image + default: "openebs/cstor-pool-mgmt" + description: "Default cStor Storage Engine Pool Management image for OpenEBS" + type: string + label: cStor Storage Engine Pool Management Image + - variable: cstor.poolMgmt.imageTag + default: "1.12.0" + description: "The image tag of cStor Storage Engine Pool Management image" + type: string + label: Image Tag For OpenEBS cStor Storage Engine Pool Management Image + - variable: cstor.target.image + default: "openebs/cstor-istgt" + description: "Default cStor Storage Engine Target image for OpenEBS" + type: string + label: cStor Storage Engine Target Image + - variable: cstor.target.imageTag + default: "1.12.0" + description: "The image tag of cStor Storage Engine Target image" + type: string + label: Image Tag For OpenEBS cStor Storage Engine Target Image + - variable: cstor.volumeMgmt.image + default: "openebs/cstor-volume-mgmt" + description: "Default cStor Storage Engine Target Management image for OpenEBS" + type: string + label: cStor Storage Engine Target Management Image + - variable: cstor.volumeMgmt.imageTag + default: "1.12.0" + description: "The image tag of cStor Storage Engine Target Management image" + type: string + label: Image Tag For OpenEBS cStor Storage Engine Target Management Image + - variable: policies.monitoring.image + default: "openebs/m-exporter" + description: "Default OpeneEBS Volume and pool Exporter image" + type: string + label: Monitoring Exporter Image + show_if: "policies.monitoring.enabled=true&&defaultImage=false" + - variable: policies.monitoring.imageTag + default: "1.12.0" + description: "The image tag of OpenEBS Exporter" + type: string + label: Image Tag For OpenEBS Exporter Image + show_if: "policies.monitoring.enabled=true&&defaultImage=false" +- variable: ndm.filters.excludeVendors + default: 'CLOUDBYT,OpenEBS' + type: string + description: "Configure NDM to filter disks from following vendors" + label: Filter Disks belonging to vendors + group: "NDM Disk Filter by Vendor " +- variable: ndm.filters.excludePaths + default: 'loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md' + type: string + description: "Configure NDM to filter disks from following paths" + label: Filter Disks belonging to paths + group: "NDM Disk Filter by Path" +- variable: ndm.sparse.enabled + default: true + description: "Create a cStor Pool on Sparse Disks" + label: Create cStor Pool on Sprase Disks + type: boolean + show_subquestion_if: true + group: "NDM Sparse Disk Settings" + subquestions: + - variable: ndm.sparse.size + default: "10737418240" + description: "Default Size of Sparse Disk" + type: string + label: Sparse Disk Size in bytes + - variable: ndm.sparse.count + default: "0" + description: "Number of Sparse Disks" + type: string + label: Number of Sparse Disks + - variable: ndm.sparse.path + default: "/var/openebs/sparse" + description: "Directory where Sparse Disks should be created" + type: string + label: Directory for Sparse Disks +- variable: defaultPorts + default: true + description: "Use default Communication Ports" + label: Use Default Ports + type: boolean + show_subquestion_if: false + group: "Communication Ports" + subquestions: + - variable: apiserver.ports.externalPort + default: 5656 + description: "Default External Port for OpenEBS API Server" + type: int + min: 0 + max: 9999 + label: OpenEBS API Server External Port + - variable: apiserver.ports.internalPort + default: 5656 + description: "Default Internal Port for OpenEBS API Server" + type: int + min: 0 + max: 9999 + label: OpenEBS API Server Internal Port +- variable: policies.monitoring.enabled + default: true + description: "Enable prometheus monitoring" + type: boolean + label: Enable Prometheus Monitoring + group: "Monitoring Settings" +- variable: analytics.enabled + default: true + description: "Enable sending anonymous statistics to OpenEBS Google Analytics" + type: boolean + label: Enable updating OpenEBS with usage details + group: "Anonymous Analytics" diff --git a/charts/openebs/openebs/1.12.300/templates/NOTES.txt b/charts/openebs/openebs/1.12.300/templates/NOTES.txt new file mode 100644 index 000000000..299a52638 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/NOTES.txt @@ -0,0 +1,27 @@ +The OpenEBS has been installed. Check its status by running: +$ kubectl get pods -n {{ .Release.Namespace }} + +For dynamically creating OpenEBS Volumes, you can either create a new StorageClass or +use one of the default storage classes provided by OpenEBS. + +Use `kubectl get sc` to see the list of installed OpenEBS StorageClasses. A sample +PVC spec using `openebs-jiva-default` StorageClass is given below:" + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demo-vol-claim +spec: + storageClassName: openebs-jiva-default + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5G +--- + +Please note that, OpenEBS uses iSCSI for connecting applications with the +OpenEBS Volumes and your nodes should have the iSCSI initiator installed. + +For more information, visit our Slack at https://openebs.io/community or view the documentation online at http://docs.openebs.io/. diff --git a/charts/openebs/openebs/1.12.300/templates/_helpers.tpl b/charts/openebs/openebs/1.12.300/templates/_helpers.tpl new file mode 100644 index 000000000..09c63c5a4 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "openebs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "openebs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "openebs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "openebs.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "openebs.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/charts/openebs/openebs/1.12.300/templates/clusterrole.yaml b/charts/openebs/openebs/1.12.300/templates/clusterrole.yaml new file mode 100644 index 000000000..3a8d3ced8 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/clusterrole.yaml @@ -0,0 +1,50 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "openebs.fullname" . }} + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: ["*"] + resources: ["nodes", "nodes/proxy"] + verbs: ["*"] +- apiGroups: ["*"] + resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs" ] + verbs: ["*"] +- apiGroups: ["*"] + resources: ["statefulsets", "daemonsets"] + verbs: ["*"] +- apiGroups: ["*"] + resources: ["resourcequotas", "limitranges"] + verbs: ["list", "watch"] +- apiGroups: ["*"] + resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "poddisruptionbudgets", "certificatesigningrequests"] + verbs: ["list", "watch"] +- apiGroups: ["*"] + resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"] + verbs: ["*"] +- apiGroups: ["volumesnapshot.external-storage.k8s.io"] + resources: ["volumesnapshots", "volumesnapshotdatas"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: [ "get", "list", "create", "update", "delete", "patch"] +- apiGroups: ["openebs.io"] + resources: [ "*"] + verbs: ["*" ] +- apiGroups: ["cstor.openebs.io"] + resources: [ "*"] + verbs: ["*" ] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "create", "list", "delete", "update", "patch"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/clusterrolebinding.yaml b/charts/openebs/openebs/1.12.300/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..0ada25cd6 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "openebs.fullname" . }} + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "openebs.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "openebs.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/cm-node-disk-manager.yaml b/charts/openebs/openebs/1.12.300/templates/cm-node-disk-manager.yaml new file mode 100644 index 000000000..165eabb50 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/cm-node-disk-manager.yaml @@ -0,0 +1,46 @@ +{{- if .Values.ndm.enabled }} +# This is the node-disk-manager related config. +# It can be used to customize the disks probes and filters +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "openebs.fullname" . }}-ndm-config + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: ndm-config + openebs.io/component-name: ndm-config +data: + # udev-probe is default or primary probe which should be enabled to run ndm + # filterconfigs contains configs of filters - in the form of include + # and exclude comma separated strings + node-disk-manager.config: | + probeconfigs: + - key: udev-probe + name: udev probe + state: true + - key: seachest-probe + name: seachest probe + state: {{ .Values.ndm.probes.enableSeachest }} + - key: smart-probe + name: smart probe + state: true + filterconfigs: + - key: os-disk-exclude-filter + name: os disk exclude filter + state: {{ .Values.ndm.filters.enableOsDiskExcludeFilter }} + exclude: "/,/etc/hosts,/boot" + - key: vendor-filter + name: vendor filter + state: {{ .Values.ndm.filters.enableVendorFilter }} + include: "" + exclude: "{{ .Values.ndm.filters.excludeVendors }}" + - key: path-filter + name: path filter + state: {{ .Values.ndm.filters.enablePathFilter }} + include: "{{ .Values.ndm.filters.includePaths }}" + exclude: "{{ .Values.ndm.filters.excludePaths }}" +--- +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/daemonset-ndm.yaml b/charts/openebs/openebs/1.12.300/templates/daemonset-ndm.yaml new file mode 100644 index 000000000..2d8aae373 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/daemonset-ndm.yaml @@ -0,0 +1,147 @@ +{{- if .Values.ndm.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "openebs.fullname" . }}-ndm + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: ndm + openebs.io/component-name: ndm + openebs.io/version: {{ .Values.release.version }} +spec: + updateStrategy: + type: "RollingUpdate" + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: ndm + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: ndm + openebs.io/component-name: ndm + name: openebs-ndm + openebs.io/version: {{ .Values.release.version }} + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + hostNetwork: true + containers: + - name: {{ template "openebs.name" . }}-ndm + image: "{{ .Values.image.repository }}{{ .Values.ndm.image }}:{{ .Values.ndm.imageTag }}" + args: + - -v=4 +{{- if .Values.featureGates.enabled }} +{{- if .Values.featureGates.GPTBasedUUID.enabled }} + - --feature-gates={{ .Values.featureGates.GPTBasedUUID.featureGateFlag }} +{{- end}} +{{- end}} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + privileged: true + env: + # namespace in which NDM is installed will be passed to NDM Daemonset + # as environment variable + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # pass hostname as env variable using downward API to the NDM container + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{{- if .Values.ndm.sparse }} +{{- if .Values.ndm.sparse.path }} + # specify the directory where the sparse files need to be created. + # if not specified, then sparse files will not be created. + - name: SPARSE_FILE_DIR + value: "{{ .Values.ndm.sparse.path }}" +{{- end }} +{{- if .Values.ndm.sparse.size }} + # Size(bytes) of the sparse file to be created. + - name: SPARSE_FILE_SIZE + value: "{{ .Values.ndm.sparse.size }}" +{{- end }} +{{- if .Values.ndm.sparse.count }} + # Specify the number of sparse files to be created + - name: SPARSE_FILE_COUNT + value: "{{ .Values.ndm.sparse.count }}" +{{- end }} +{{- end }} + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can be used here with pgrep (cmd is < 15 chars). + livenessProbe: + exec: + command: + - pgrep + - "ndm" + initialDelaySeconds: {{ .Values.ndm.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.ndm.healthCheck.periodSeconds }} + volumeMounts: + - name: config + mountPath: /host/node-disk-manager.config + subPath: node-disk-manager.config + readOnly: true + - name: udev + mountPath: /run/udev + - name: procmount + mountPath: /host/proc + readOnly: true + - name: basepath + mountPath: /var/openebs/ndm +{{- if .Values.ndm.sparse }} +{{- if .Values.ndm.sparse.path }} + - name: sparsepath + mountPath: {{ .Values.ndm.sparse.path }} +{{- end }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "openebs.fullname" . }}-ndm-config + - name: udev + hostPath: + path: /run/udev + type: Directory + # mount /proc (to access mount file of process 1 of host) inside container + # to read mount-point of disks and partitions + - name: procmount + hostPath: + path: /proc + type: Directory + - name: basepath + hostPath: + path: "{{ .Values.varDirectoryPath.baseDir }}/ndm" + type: DirectoryOrCreate +{{- if .Values.ndm.sparse }} +{{- if .Values.ndm.sparse.path }} + - name: sparsepath + hostPath: + path: {{ .Values.ndm.sparse.path }} +{{- end }} +{{- end }} + # By default the node-disk-manager will be run on all kubernetes nodes + # If you would like to limit this to only some nodes, say the nodes + # that have storage attached, you could label those node and use + # nodeSelector. + # + # e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node" + # kubectl label node "openebs.io/nodegroup"="storage-node" + #nodeSelector: + # "openebs.io/nodegroup": "storage-node" +{{- if .Values.ndm.nodeSelector }} + nodeSelector: +{{ toYaml .Values.ndm.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.ndm.tolerations }} + tolerations: +{{ toYaml .Values.ndm.tolerations | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-admission-server.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-admission-server.yaml new file mode 100644 index 000000000..a577de02d --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-admission-server.yaml @@ -0,0 +1,76 @@ +{{- if .Values.webhook.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-admission-server + labels: + app: admission-webhook + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: admission-webhook + openebs.io/component-name: admission-webhook + openebs.io/version: {{ .Values.release.version }} +spec: + replicas: {{ .Values.webhook.replicas }} + strategy: + type: "Recreate" + rollingUpdate: null + selector: + matchLabels: + app: admission-webhook + template: + metadata: + labels: + app: admission-webhook + name: admission-webhook + release: {{ .Release.Name }} + openebs.io/version: {{ .Values.release.version }} + openebs.io/component-name: admission-webhook + spec: +{{- if .Values.webhook.hostNetwork }} + hostNetwork: true +{{- end }} +{{- if .Values.webhook.nodeSelector }} + nodeSelector: +{{ toYaml .Values.webhook.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.webhook.tolerations }} + tolerations: +{{ toYaml .Values.webhook.tolerations | indent 8 }} +{{- end }} +{{- if .Values.webhook.affinity }} + affinity: +{{ toYaml .Values.webhook.affinity | indent 8 }} +{{- end }} + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: admission-webhook + image: "{{ .Values.image.repository }}{{ .Values.webhook.image }}:{{ .Values.webhook.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - -alsologtostderr + - -v=2 + - 2>&1 + env: + - name: OPENEBS_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ADMISSION_WEBHOOK_FAILURE_POLICY + value: "{{ .Values.webhook.failurePolicy }}" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can't be used here with pgrep (>15 chars).A regular expression + # Anchor `^` : matches any string that starts with `admission-serve` + # `.*`: matche any string that has `admission-serve` followed by zero or more char + # that matches the entire command name has to specified. + livenessProbe: + exec: + command: + - sh + - -c + - test `pgrep -c "^admission-serve.*"` = 1 + initialDelaySeconds: {{ .Values.webhook.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.webhook.healthCheck.periodSeconds }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-local-provisioner.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-local-provisioner.yaml new file mode 100644 index 000000000..ba064a790 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-local-provisioner.yaml @@ -0,0 +1,99 @@ +{{- if .Values.localprovisioner.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-localpv-provisioner + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: {{ .Values.release.version }} +spec: + replicas: {{ .Values.localprovisioner.replicas }} + strategy: + type: "Recreate" + rollingUpdate: null + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: localpv-provisioner + name: openebs-localpv-provisioner + openebs.io/component-name: openebs-localpv-provisioner + openebs.io/version: {{ .Values.release.version }} + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: {{ template "openebs.name" . }}-localpv-provisioner + image: "{{ .Values.image.repository }}{{ .Values.localprovisioner.image }}:{{ .Values.localprovisioner.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://10.128.0.12:8080" + # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + # OPENEBS_NAMESPACE is the namespace that this provisioner will + # lookup to find maya api service + - name: OPENEBS_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as + # environment variable + - name: OPENEBS_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # OPENEBS_IO_BASE_PATH is the environment variable that provides the + # default base path on the node where host-path PVs will be provisioned. + - name: OPENEBS_IO_ENABLE_ANALYTICS + value: "{{ .Values.analytics.enabled }}" + - name: OPENEBS_IO_BASE_PATH + value: "{{ .Values.localprovisioner.basePath }}" + - name: OPENEBS_IO_HELPER_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}" + - name: OPENEBS_IO_INSTALLER_TYPE + value: "charts-helm" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can't be used here with pgrep (>15 chars).A regular expression + # that matches the entire command name has to specified. + # Anchor `^` : matches any string that starts with `provisioner-loc` + # `.*`: matches any string that has `provisioner-loc` followed by zero or more char + livenessProbe: + exec: + command: + - sh + - -c + - test `pgrep -c "^provisioner-loc.*"` = 1 + initialDelaySeconds: {{ .Values.localprovisioner.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.localprovisioner.healthCheck.periodSeconds }} +{{- if .Values.localprovisioner.nodeSelector }} + nodeSelector: +{{ toYaml .Values.localprovisioner.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.localprovisioner.tolerations }} + tolerations: +{{ toYaml .Values.localprovisioner.tolerations | indent 8 }} +{{- end }} +{{- if .Values.localprovisioner.affinity }} + affinity: +{{ toYaml .Values.localprovisioner.affinity | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-maya-apiserver.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-maya-apiserver.yaml new file mode 100644 index 000000000..5f4b5884e --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-maya-apiserver.yaml @@ -0,0 +1,165 @@ +{{- if .Values.apiserver.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-apiserver + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: apiserver + name: maya-apiserver + openebs.io/component-name: maya-apiserver + openebs.io/version: {{ .Values.release.version }} +spec: + replicas: {{ .Values.apiserver.replicas }} + strategy: + type: "Recreate" + rollingUpdate: null + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: apiserver + name: maya-apiserver + openebs.io/component-name: maya-apiserver + openebs.io/version: {{ .Values.release.version }} + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: {{ template "openebs.name" . }}-apiserver + image: "{{ .Values.image.repository }}{{ .Values.apiserver.image }}:{{ .Values.apiserver.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.apiserver.ports.internalPort }} + env: + # OPENEBS_IO_KUBE_CONFIG enables maya api service to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for maya api server version 0.5.2 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + # OPENEBS_IO_K8S_MASTER enables maya api service to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for maya api server version 0.5.2 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://172.28.128.3:8080" + # OPENEBS_NAMESPACE provides the namespace of this deployment as an + # environment variable + - name: OPENEBS_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as + # environment variable + - name: OPENEBS_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + # OPENEBS_MAYA_POD_NAME provides the name of this pod as + # environment variable + - name: OPENEBS_MAYA_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # If OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG is false then OpenEBS default + # storageclass and storagepool will not be created. + - name: OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG + value: "{{ .Values.defaultStorageConfig.enabled }}" + # OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL decides whether default cstor sparse pool should be + # configured as a part of openebs installation. + # If "true" a default cstor sparse pool will be configured, if "false" it will not be configured. + # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG + # is set to true + - name: OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL + value: "{{ .Values.apiserver.sparse.enabled }}" + # OPENEBS_IO_CSTOR_TARGET_DIR can be used to specify the hostpath + # to be used for saving the shared content between the side cars + # of cstor volume pod. + # The default path used is /var/openebs/sparse + - name: OPENEBS_IO_CSTOR_TARGET_DIR + value: "{{ .Values.ndm.sparse.path }}" + # OPENEBS_IO_CSTOR_POOL_SPARSE_DIR can be used to specify the hostpath + # to be used for saving the shared content between the side cars + # of cstor pool pod. This ENV is also used to indicate the location + # of the sparse devices. + # The default path used is /var/openebs/sparse + - name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR + value: "{{ .Values.ndm.sparse.path }}" + # OPENEBS_IO_JIVA_POOL_DIR can be used to specify the hostpath + # to be used for default Jiva StoragePool loaded by OpenEBS + # The default path used is /var/openebs + # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG + # is set to true + - name: OPENEBS_IO_JIVA_POOL_DIR + value: "{{ .Values.jiva.defaultStoragePath }}" + # OPENEBS_IO_LOCALPV_HOSTPATH_DIR can be used to specify the hostpath + # to be used for default openebs-hostpath storageclass loaded by OpenEBS + # The default path used is /var/openebs/local + # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG + # is set to true + - name: OPENEBS_IO_LOCALPV_HOSTPATH_DIR + value: "{{ .Values.localprovisioner.basePath }}" + # OPENEBS_IO_BASE_DIR used by the OpenEBS to store debug information and + # so forth that are generated in the course of running OpenEBS containers. + - name: OPENEBS_IO_BASE_DIR + value: "{{ .Values.varDirectoryPath.baseDir }}" + - name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}" + - name: OPENEBS_IO_JIVA_REPLICA_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}" + - name: OPENEBS_IO_JIVA_REPLICA_COUNT + value: "{{ .Values.jiva.replicas }}" + - name: OPENEBS_IO_CSTOR_TARGET_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.cstor.target.image }}:{{ .Values.cstor.target.imageTag }}" + - name: OPENEBS_IO_CSTOR_POOL_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.cstor.pool.image }}:{{ .Values.cstor.pool.imageTag }}" + - name: OPENEBS_IO_CSTOR_POOL_MGMT_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.cstor.poolMgmt.image }}:{{ .Values.cstor.poolMgmt.imageTag }}" + - name: OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.cstor.volumeMgmt.image }}:{{ .Values.cstor.volumeMgmt.imageTag }}" + - name: OPENEBS_IO_VOLUME_MONITOR_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}" + - name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}" + - name: OPENEBS_IO_HELPER_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}" + # OPENEBS_IO_ENABLE_ANALYTICS if set to true sends anonymous usage + # events to Google Analytics + - name: OPENEBS_IO_ENABLE_ANALYTICS + value: "{{ .Values.analytics.enabled }}" + # OPENEBS_IO_ANALYTICS_PING_INTERVAL can be used to specify the duration (in hours) + # for periodic ping events sent to Google Analytics. Default is 24 hours. + - name: OPENEBS_IO_ANALYTICS_PING_INTERVAL + value: "{{ .Values.analytics.pingInterval }}" + - name: OPENEBS_IO_INSTALLER_TYPE + value: "charts-helm" + # OPENEBS_IO_INSTALL_CRD environment variable is used to enable/disable CRD installation + # from Maya API server. By default the CRDs will be installed + - name: OPENEBS_IO_INSTALL_CRD + value: "{{ .Values.crd.enableInstall }}" + livenessProbe: + exec: + command: + - /usr/local/bin/mayactl + - version + initialDelaySeconds: {{ .Values.apiserver.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.apiserver.healthCheck.periodSeconds }} +{{- if .Values.apiserver.nodeSelector }} + nodeSelector: +{{ toYaml .Values.apiserver.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.apiserver.tolerations }} + tolerations: +{{ toYaml .Values.apiserver.tolerations | indent 8 }} +{{- end }} +{{- if .Values.apiserver.affinity }} + affinity: +{{ toYaml .Values.apiserver.affinity | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-maya-provisioner.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-maya-provisioner.yaml new file mode 100644 index 000000000..df917018e --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-maya-provisioner.yaml @@ -0,0 +1,98 @@ +{{- if .Values.provisioner.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-provisioner + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: provisioner + name: openebs-provisioner + openebs.io/component-name: openebs-provisioner + openebs.io/version: {{ .Values.release.version }} +spec: + replicas: {{ .Values.provisioner.replicas }} + strategy: + type: "Recreate" + rollingUpdate: null + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: provisioner + name: openebs-provisioner + openebs.io/component-name: openebs-provisioner + openebs.io/version: {{ .Values.release.version }} + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: {{ template "openebs.name" . }}-provisioner + image: "{{ .Values.image.repository }}{{ .Values.provisioner.image }}:{{ .Values.provisioner.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://10.128.0.12:8080" + # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for openebs provisioner version 0.5.2 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + # OPENEBS_NAMESPACE is the namespace that this provisioner will + # lookup to find maya api service + - name: OPENEBS_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name, + # that provisioner should forward the volume create/delete requests. + # If not present, "maya-apiserver-service" will be used for lookup. + # This is supported for openebs provisioner version 0.5.3-RC1 onwards + - name: OPENEBS_MAYA_SERVICE_NAME + value: "{{ template "openebs.fullname" . }}-apiservice" + # The following values will be set as annotations to the PV object. + # Refer : https://github.com/openebs/external-storage/pull/15 + #- name: OPENEBS_MONITOR_URL + # value: "{{ .Values.provisioner.monitorUrl }}" + #- name: OPENEBS_MONITOR_VOLKEY + # value: "{{ .Values.provisioner.monitorVolumeKey }}" + #- name: MAYA_PORTAL_URL + # value: "{{ .Values.provisioner.mayaPortalUrl }}" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can't be used here with pgrep (>15 chars).A regular expression + # that matches the entire command name has to specified. + # Anchor `^` : matches any string that starts with `openebs-provis` + # `.*`: matches any string that has `openebs-provis` followed by zero or more char + livenessProbe: + exec: + command: + - sh + - -c + - test `pgrep -c "^openebs-provisi.*"` = 1 + initialDelaySeconds: {{ .Values.provisioner.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.provisioner.healthCheck.periodSeconds }} +{{- if .Values.provisioner.nodeSelector }} + nodeSelector: +{{ toYaml .Values.provisioner.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.provisioner.tolerations }} + tolerations: +{{ toYaml .Values.provisioner.tolerations | indent 8 }} +{{- end }} +{{- if .Values.provisioner.affinity }} + affinity: +{{ toYaml .Values.provisioner.affinity | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-maya-snapshot-operator.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-maya-snapshot-operator.yaml new file mode 100644 index 000000000..707986831 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-maya-snapshot-operator.yaml @@ -0,0 +1,131 @@ +{{- if .Values.snapshotOperator.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-snapshot-operator + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: snapshot-operator + openebs.io/component-name: openebs-snapshot-operator + openebs.io/version: {{ .Values.release.version }} +spec: + replicas: {{ .Values.snapshotOperator.replicas }} + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + strategy: + type: "Recreate" + rollingUpdate: null + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: snapshot-operator + name: openebs-snapshot-operator + openebs.io/version: {{ .Values.release.version }} + openebs.io/component-name: openebs-snapshot-operator + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: {{ template "openebs.name" . }}-snapshot-controller + image: "{{ .Values.image.repository }}{{ .Values.snapshotOperator.controller.image }}:{{ .Values.snapshotOperator.controller.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + # OPENEBS_IO_K8S_MASTER enables openebs snapshot controller to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for openebs snapshot controller version 0.6-RC1 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://10.128.0.12:8080" + # OPENEBS_IO_KUBE_CONFIG enables openebs snapshot controller to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for openebs snapshot controller version 0.6-RC1 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + # OPENEBS_NAMESPACE is the namespace that this snapshot controller will + # lookup to find maya api service + - name: OPENEBS_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name, + # that snapshot controller should forward the volume snapshot requests. + # If not present, "maya-apiserver-service" will be used for lookup. + # This is supported for openebs snapshot controller version 0.6-RC1 onwards + - name: OPENEBS_MAYA_SERVICE_NAME + value: "{{ template "openebs.fullname" . }}-apiservice" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can't be used here with pgrep (>15 chars).A regular expression + # that matches the entire command name has to specified. + # Anchor `^` : matches any string that starts with `snapshot-contro` + # `.*`: matches any string that has `snapshot-contro` followed by zero or more char + livenessProbe: + exec: + command: + - sh + - -c + - test `pgrep -c "^snapshot-contro.*"` = 1 + initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }} + - name: {{ template "openebs.name" . }}-snapshot-provisioner + image: "{{ .Values.image.repository }}{{ .Values.snapshotOperator.provisioner.image }}:{{ .Values.snapshotOperator.provisioner.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + # OPENEBS_IO_K8S_MASTER enables openebs snapshot provisioner to connect to K8s + # based on this address. This is ignored if empty. + # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards + #- name: OPENEBS_IO_K8S_MASTER + # value: "http://10.128.0.12:8080" + # OPENEBS_IO_KUBE_CONFIG enables openebs snapshot provisioner to connect to K8s + # based on this config. This is ignored if empty. + # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards + #- name: OPENEBS_IO_KUBE_CONFIG + # value: "/home/ubuntu/.kube/config" + # OPENEBS_NAMESPACE is the namespace that this snapshot provisioner will + # lookup to find maya api service + - name: OPENEBS_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name, + # that snapshot provisioner should forward the volume snapshot PV requests. + # If not present, "maya-apiserver-service" will be used for lookup. + # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards + - name: OPENEBS_MAYA_SERVICE_NAME + value: "{{ template "openebs.fullname" . }}-apiservice" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can't be used here with pgrep (>15 chars).A regular expression + # that matches the entire command name has to specified. + # Anchor `^` : matches any string that starts with `snapshot-provis` + # `.*`: matches any string that has `snapshot-provis` followed by zero or more char + livenessProbe: + exec: + command: + - sh + - -c + - test `pgrep -c "^snapshot-provis.*"` = 1 + initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }} +{{- if .Values.snapshotOperator.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snapshotOperator.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.snapshotOperator.tolerations }} + tolerations: +{{ toYaml .Values.snapshotOperator.tolerations | indent 8 }} +{{- end }} +{{- if .Values.snapshotOperator.affinity }} + affinity: +{{ toYaml .Values.snapshotOperator.affinity | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/deployment-ndm-operator.yaml b/charts/openebs/openebs/1.12.300/templates/deployment-ndm-operator.yaml new file mode 100644 index 000000000..27e61d5a0 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/deployment-ndm-operator.yaml @@ -0,0 +1,87 @@ +{{- if .Values.ndmOperator.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "openebs.fullname" . }}-ndm-operator + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: ndm-operator + openebs.io/component-name: ndm-operator + openebs.io/version: {{ .Values.release.version }} + name: ndm-operator +spec: + replicas: {{ .Values.ndmOperator.replicas }} + strategy: + type: "Recreate" + rollingUpdate: null + selector: + matchLabels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: ndm-operator + name: ndm-operator + openebs.io/component-name: ndm-operator + openebs.io/version: {{ .Values.release.version }} + spec: + serviceAccountName: {{ template "openebs.serviceAccountName" . }} + containers: + - name: {{ template "openebs.fullname" . }}-ndm-operator + image: "{{ .Values.image.repository }}{{ .Values.ndmOperator.image }}:{{ .Values.ndmOperator.imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + readinessProbe: + exec: + command: + - stat + - /tmp/operator-sdk-ready + initialDelaySeconds: {{ .Values.ndmOperator.readinessCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.ndmOperator.readinessCheck.periodSeconds }} + failureThreshold: {{ .Values.ndmOperator.readinessCheck.failureThreshold }} + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPERATOR_NAME + value: "node-disk-operator" + - name: CLEANUP_JOB_IMAGE + value: "{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}" + # OPENEBS_IO_INSTALL_CRD environment variable is used to enable/disable CRD installation + # from NDM Operator. By default the CRDs will be installed + - name: OPENEBS_IO_INSTALL_CRD + value: "{{ .Values.crd.enableInstall }}" + # Process name used for matching is limited to the 15 characters + # present in the pgrep output. + # So fullname can be used here with pgrep (cmd is < 15 chars). + livenessProbe: + exec: + command: + - pgrep + - "ndo" + initialDelaySeconds: {{ .Values.ndmOperator.healthCheck.initialDelaySeconds }} + periodSeconds: {{ .Values.ndmOperator.healthCheck.periodSeconds }} +{{- if .Values.ndmOperator.nodeSelector }} + nodeSelector: +{{ toYaml .Values.ndmOperator.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.ndmOperator.tolerations }} + tolerations: +{{ toYaml .Values.ndmOperator.tolerations | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/psp-clusterrole.yaml b/charts/openebs/openebs/1.12.300/templates/psp-clusterrole.yaml new file mode 100644 index 000000000..a6c4807dd --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/psp-clusterrole.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "openebs.fullname" . }}-psp + labels: + app: {{ template "openebs.name" . }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "openebs.fullname" . }}-psp +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/psp-clusterrolebinding.yaml b/charts/openebs/openebs/1.12.300/templates/psp-clusterrolebinding.yaml new file mode 100644 index 000000000..5a4205877 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "openebs.fullname" . }}-psp + labels: + app: {{ template "openebs.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "openebs.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ template "openebs.serviceAccountName" . }} + namespace: {{ $.Release.Namespace }} +{{- end }} + diff --git a/charts/openebs/openebs/1.12.300/templates/psp.yaml b/charts/openebs/openebs/1.12.300/templates/psp.yaml new file mode 100644 index 000000000..0442f0e5d --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/psp.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "openebs.fullname" . }}-psp + namespace: {{ $.Release.Namespace }} + labels: + app: {{ template "openebs.name" . }} +spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: ['*'] + volumes: ['*'] + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/service-maya-apiserver.yaml b/charts/openebs/openebs/1.12.300/templates/service-maya-apiserver.yaml new file mode 100644 index 000000000..d44bcb0f8 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/service-maya-apiserver.yaml @@ -0,0 +1,23 @@ +{{- if .Values.apiserver.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "openebs.fullname" . }}-apiservice + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + openebs.io/component-name: maya-apiserver-svc +spec: + ports: + - name: api + port: {{ .Values.apiserver.ports.externalPort }} + targetPort: {{ .Values.apiserver.ports.internalPort }} + protocol: TCP + selector: + app: {{ template "openebs.name" . }} + release: {{ .Release.Name }} + component: apiserver + sessionAffinity: None +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/templates/serviceaccount.yaml b/charts/openebs/openebs/1.12.300/templates/serviceaccount.yaml new file mode 100644 index 000000000..31a500455 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "openebs.serviceAccountName" . }} + labels: + app: {{ template "openebs.name" . }} + chart: {{ template "openebs.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end }} diff --git a/charts/openebs/openebs/1.12.300/values.yaml b/charts/openebs/openebs/1.12.300/values.yaml new file mode 100644 index 000000000..ff2cc72c0 --- /dev/null +++ b/charts/openebs/openebs/1.12.300/values.yaml @@ -0,0 +1,184 @@ +# Default values for openebs. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rbac: + # Specifies whether RBAC resources should be created + create: true + pspEnabled: false + +serviceAccount: + create: true + name: + +release: + # "openebs.io/version" label for control plane components + version: "1.12.0" + +image: + pullPolicy: IfNotPresent + repository: "" + +apiserver: + enabled: true + image: "openebs/m-apiserver" + imageTag: "1.12.0" + replicas: 1 + ports: + externalPort: 5656 + internalPort: 5656 + sparse: + enabled: "false" + nodeSelector: {} + tolerations: [] + affinity: {} + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + +defaultStorageConfig: + enabled: "true" + +# Directory used by the OpenEBS to store debug information and so forth +# that are generated in the course of running OpenEBS containers. +varDirectoryPath: + baseDir: "/var/openebs" + +provisioner: + enabled: true + image: "openebs/openebs-k8s-provisioner" + imageTag: "1.12.0" + replicas: 1 + nodeSelector: {} + tolerations: [] + affinity: {} + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + +localprovisioner: + enabled: true + image: "openebs/provisioner-localpv" + imageTag: "1.12.0" + replicas: 1 + basePath: "/var/openebs/local" + nodeSelector: {} + tolerations: [] + affinity: {} + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + +snapshotOperator: + enabled: true + controller: + image: "openebs/snapshot-controller" + imageTag: "1.12.0" + provisioner: + image: "openebs/snapshot-provisioner" + imageTag: "1.12.0" + replicas: 1 + upgradeStrategy: "Recreate" + nodeSelector: {} + tolerations: [] + affinity: {} + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + +ndm: + enabled: true + image: "openebs/node-disk-manager" + imageTag: "0.7.0" + sparse: + path: "/var/openebs/sparse" + size: "10737418240" + count: "0" + filters: + enableOsDiskExcludeFilter: true + enableVendorFilter: true + excludeVendors: "CLOUDBYT,OpenEBS" + enablePathFilter: true + includePaths: "" + excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd" + probes: + enableSeachest: false + nodeSelector: {} + tolerations: [] + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + +ndmOperator: + enabled: true + image: "openebs/node-disk-operator" + imageTag: "0.7.0" + replicas: 1 + upgradeStrategy: Recreate + nodeSelector: {} + tolerations: [] + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + readinessCheck: + initialDelaySeconds: 4 + periodSeconds: 10 + failureThreshold: 1 + +webhook: + enabled: true + image: "openebs/admission-server" + imageTag: "1.12.0" + failurePolicy: "Fail" + replicas: 1 + healthCheck: + initialDelaySeconds: 30 + periodSeconds: 60 + nodeSelector: {} + tolerations: [] + affinity: {} + hostNetwork: false + +jiva: + image: "openebs/jiva" + imageTag: "1.12.0" + replicas: 3 + defaultStoragePath: "/var/openebs" + +cstor: + pool: + image: "openebs/cstor-pool" + imageTag: "1.12.0" + poolMgmt: + image: "openebs/cstor-pool-mgmt" + imageTag: "1.12.0" + target: + image: "openebs/cstor-istgt" + imageTag: "1.12.0" + volumeMgmt: + image: "openebs/cstor-volume-mgmt" + imageTag: "1.12.0" + +helper: + image: "openebs/linux-utils" + imageTag: "1.12.0" + +featureGates: + enabled: false + GPTBasedUUID: + enabled: false + featureGateFlag: "GPTBasedUUID" + +crd: + enableInstall: true + +policies: + monitoring: + enabled: true + image: "openebs/m-exporter" + imageTag: "1.12.0" + +analytics: + enabled: true + # Specify in hours the duration after which a ping event needs to be sent. + pingInterval: "24h" diff --git a/charts/portshift-operator/portshift-operator/0.1.000/.helmignore b/charts/portshift-operator/portshift-operator/0.1.000/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/portshift-operator/portshift-operator/0.1.000/Chart.yaml b/charts/portshift-operator/portshift-operator/0.1.000/Chart.yaml new file mode 100644 index 000000000..82b157203 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: portshift-operator +apiVersion: v1 +appVersion: v0.1.3 +description: | + Portshift cloud-native security platform is an agentless security solution for containerized applications +home: https://www.portshift.io/ +icon: https://www.portshift.io/wp-content/uploads/2019/10/portshift-logo-68.png +keywords: +- portshift +- operator +- monitoring +- security +- alerting +- metric +- troubleshooting +- run-time +maintainers: +- email: idan@portshift.io + name: idan +name: portshift-operator +version: 0.1.000 diff --git a/charts/portshift-operator/portshift-operator/0.1.000/README.md b/charts/portshift-operator/portshift-operator/0.1.000/README.md new file mode 100644 index 000000000..80073c37f --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/README.md @@ -0,0 +1,5 @@ +# Portshift Operator + +Please [contact us](https://www.portshift.io/contact-us/) to get installation instructions. + +This Helm Chart requires Helm 3. diff --git a/charts/portshift-operator/portshift-operator/0.1.000/app-readme.md b/charts/portshift-operator/portshift-operator/0.1.000/app-readme.md new file mode 100644 index 000000000..2dafd7ac4 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/app-readme.md @@ -0,0 +1,5 @@ +Portshift cloud-native security platform is an agentless security solution for containerized applications. + +Portshift provides a real-time Kubernetes container security solution that addresses images, containers and cluster resources security. Detecting images vulnerabilities, pods misconfiguration, overly permissive roles and clusters misconfiguration and threats. + +Portshift security platform leverages service mesh to enable unmatched network security enforcement tools that simplifies the implementation of zero trust security model in Kubernetes clusters, complemented by declarative security policies cluster entities visualisation. \ No newline at end of file diff --git a/charts/portshift-operator/portshift-operator/0.1.000/crds/crd.yaml b/charts/portshift-operator/portshift-operator/0.1.000/crds/crd.yaml new file mode 100644 index 000000000..adf76f92d --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/crds/crd.yaml @@ -0,0 +1,53 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: portshiftinstallers.portshift.io + labels: + app: portshift-operator +spec: + group: portshift.io + names: + kind: PortshiftInstaller + listKind: PortshiftInstallerList + plural: portshiftinstallers + singular: portshiftinstaller + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: PortshiftInstaller is the Schema for the portshiftinstallers API + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info in https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info in https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PortshiftInstallerSpec defines the desired state of PortshiftInstaller + properties: + portshiftClusterId: + description: Portshift cluster id as specified in Portshift console + type: string + managementUrl: + description: Portshift console URL + type: string + required: + - portshiftClusterId + - managementUrl + type: object + status: + description: Place holder for future use. + properties: + phase: + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/portshift-operator/portshift-operator/0.1.000/questions.yaml b/charts/portshift-operator/portshift-operator/0.1.000/questions.yaml new file mode 100644 index 000000000..c08055359 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/questions.yaml @@ -0,0 +1,60 @@ +questions: +#operator image configurations +- variable: defaultImage + default: true + description: "Use default Portshift operator image or specify a custom one" + label: Use Default Portshift Image + type: boolean + show_subquestion_if: false + group: "Operator Container Images" + subquestions: + - variable: operator.image.repository + default: "gcr.io/development-infra-208909/k8s_operator" + description: "Portshift Operator Image Name" + type: string + label: Portshift Operator Image Name + - variable: operator.image.tag + default: "v0.1.3" + description: "Portshift Operator Image Tag" + type: string + label: Portshift Operator Image Tag + - variable: operator.image.pullPolicy + default: "IfNotPresent" + description: "Portshift Operator Image Pull Policy" + type: string + label: Portshift Operator Image Pull Policy +#operator access configurations +- variable: operator.secret.accessKey + default: "" + description: "Operator access key retrieved from Portshift console" + type: string + required: true + label: Portshift Operator Access Key + group: "Operator Access Configuration" +- variable: operator.secret.secretKey + default: "" + description: "Operator secret key retrieved from Portshift console" + type: string + required: true + label: Portshift Operator Secret Key + group: "Operator Access Configuration" +- variable: defaultMgmtUrl + default: true + description: "Use default Portshift console url or specify a custom one" + label: Use Default Portshift Url + type: boolean + show_subquestion_if: false + group: "Operator Access Configuration" + subquestions: + - variable: operator.portshiftinstaller.managementUrl + default: "console.portshift.io" + description: "Portshift Console Url" + type: string + label: Portshift Console Url +- variable: operator.portshiftinstaller.portshiftClusterId + default: "" + description: "Cluster id definition retrieved from Portshift console" + type: string + required: true + label: Portshift Cluster ID + group: "Operator Access Configuration" diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/_helpers.tpl b/charts/portshift-operator/portshift-operator/0.1.000/templates/_helpers.tpl new file mode 100644 index 000000000..96fb234df --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/_helpers.tpl @@ -0,0 +1,45 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "portshift-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "portshift-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "portshift-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "portshift-operator.labels" -}} +app.kubernetes.io/name: {{ include "portshift-operator.name" . }} +helm.sh/chart: {{ include "portshift-operator.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/clusterrolebinding.yaml b/charts/portshift-operator/portshift-operator/0.1.000/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..9421465ea --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portshift-operator + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portshift-operator + namespace: {{ .Release.Namespace }} diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/deployment.yaml b/charts/portshift-operator/portshift-operator/0.1.000/templates/deployment.yaml new file mode 100644 index 000000000..4fc8aacfe --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portshift-operator + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "portshift-operator.name" . }} + template: + metadata: + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + serviceAccountName: portshift-operator + containers: + - name: portshift-operator + image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}" + command: + - portshift-operator + imagePullPolicy: "{{ .Values.operator.image.pullPolicy }}" + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: portshift-operator diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/portshiftinstaller.yaml b/charts/portshift-operator/portshift-operator/0.1.000/templates/portshiftinstaller.yaml new file mode 100644 index 000000000..1cec0ff15 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/portshiftinstaller.yaml @@ -0,0 +1,13 @@ +apiVersion: portshift.io/v1 +kind: PortshiftInstaller +metadata: + name: portshiftinstaller + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + managementUrl: {{ .Values.operator.portshiftinstaller.managementUrl }} + portshiftClusterId: {{ .Values.operator.portshiftinstaller.portshiftClusterId }} diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/secret.yaml b/charts/portshift-operator/portshift-operator/0.1.000/templates/secret.yaml new file mode 100644 index 000000000..24064d7f1 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/secret.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: portshift-operator-secret + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + accessKey: {{ required "A valid .Values.operator.secret.accessKey is required" .Values.operator.secret.accessKey | b64enc | quote }} + secretKey: {{ required "A valid .Values.operator.secret.secretKey is required" .Values.operator.secret.secretKey | b64enc | quote }} diff --git a/charts/portshift-operator/portshift-operator/0.1.000/templates/serviceaccount.yaml b/charts/portshift-operator/portshift-operator/0.1.000/templates/serviceaccount.yaml new file mode 100644 index 000000000..0604efeb6 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portshift-operator + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "portshift-operator.name" . }} + chart: {{ template "portshift-operator.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} diff --git a/charts/portshift-operator/portshift-operator/0.1.000/values.yaml b/charts/portshift-operator/portshift-operator/0.1.000/values.yaml new file mode 100644 index 000000000..438fa39e8 --- /dev/null +++ b/charts/portshift-operator/portshift-operator/0.1.000/values.yaml @@ -0,0 +1,11 @@ +operator: + image: + repository: gcr.io/development-infra-208909/k8s_operator + tag: v0.1.3 + pullPolicy: IfNotPresent + secret: + accessKey: "" + secretKey: "" + portshiftinstaller: + managementUrl: console.portshift.io + portshiftClusterId: "" diff --git a/charts/streamsets/control-agent/2.0.100/.helmignore b/charts/streamsets/control-agent/2.0.100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/streamsets/control-agent/2.0.100/Chart.yaml b/charts/streamsets/control-agent/2.0.100/Chart.yaml new file mode 100644 index 000000000..918242aac --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/Chart.yaml @@ -0,0 +1,19 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: streamsets +apiVersion: v1 +appVersion: 3.8.0 +description: Control Agent for managing StreamSets Control Hub Deployments +home: https://streamsets.com +icon: https://github.com/streamsets/datacollector/raw/master/basic-lib/src/main/resources/sdcipc.png +keywords: +- streamsets +- sdc +- sch +maintainers: +- email: thomas.ganka@streamsets.com + name: thomasganka +name: control-agent +sources: +- https://github.com/streamsets/helm-charts/tree/master/incubating/control-agent +version: 2.0.100 diff --git a/charts/streamsets/control-agent/2.0.100/README.md b/charts/streamsets/control-agent/2.0.100/README.md new file mode 100644 index 000000000..fd7dd1e5a --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/README.md @@ -0,0 +1,52 @@ +# StreamSets Control Agent + +The [StreamSets](https://streamsets.com) control agent manages StreamSets Control Hub deployments. + +## Introduction + +This chart supports both RBAC and non-RBAC enabled clusters. It has no dependencies. + +## Installing the Chart + +First, add the streamsets stable repository to helm. + +```bash +helm repo add streamsets https://streamsets.github.io/helm-charts/stable +``` + +To install the chart with the release name `my-release` into the namespace `streamsets`: + +```bash +helm install streamsets/control-agent --name my-release --namespace streamsets +``` + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +| ------------------------------- | -------------------------------------------------------------------- | ----------------------------------------- | +| `image.repository` | Control Agent image name | `streamsets/control-agent` | +| `image.tag` | The version of the official image to use | `3.0.0` | +| `image.pullPolicy` | Pull policy for the image | `IfNotPresent` | +| `streamsets.orgId` | This is the part of your SCH/DPM username after the `@` | None (Required) | +| `streamsets.api.url` | The URL for the SCH/DPM instance to connect to | `https://cloud.streamsets.com` | +| `streamsets.api.token` | Agent auth token from the SCH/DPM REST API or UI | None (Required) | +| `rbac.enabled` | Creates req'd ServiceAccount and Role on RBAC-enabled cluster | `true` | +| `resources` | Resource request for the pod | None | +| `nodeSelector` | Node Selector to apply to the deployment | None | + +`streamsets.api.token` and `streamsets.orgId` are required values + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example: + +```bash +helm install streamsets/control-agent --set streamsets.api.token="my_api_token" --set streamsets.orgId="my_org" +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while +installing the chart. For example: + +```bash +helm install streamsets/control-agent --values values.yaml +``` diff --git a/charts/streamsets/control-agent/2.0.100/app-readme.md b/charts/streamsets/control-agent/2.0.100/app-readme.md new file mode 100644 index 000000000..f511468db --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/app-readme.md @@ -0,0 +1,5 @@ +# Streamsets + +[Streamsets](https://www.streamsets.com/) is a data engineering platform. This chart adds the Streamsets Agent to all nodes in your cluster. The agent communicates with Control Hub to automatically provision Data Collector containers in the Kubernetes cluster in which it runs [Streamsets Control Hub](https://streamsets.com/documentation/controlhub/latest/help/controlhub/UserGuide/GettingStarted/DPM.html#concept_l45_qwf_xw. For more information about deploying Streamsets on Kubernetes, please refer to the [Streamsets documentation website](https://streamsets.com/documentation/controlhub/latest/help/controlhub/UserGuide/DataCollectorsProvisioned/Provisioned.html#concept_jsd_v24_lbb). + +Streamsets [Docker Image](https://hub.docker.com/r/streamsets/datacollector). \ No newline at end of file diff --git a/charts/streamsets/control-agent/2.0.100/krb/krb5.conf b/charts/streamsets/control-agent/2.0.100/krb/krb5.conf new file mode 100644 index 000000000..d97d77d1e --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/krb/krb5.conf @@ -0,0 +1,17 @@ +[libdefaults] +default_realm = +dns_lookup_kdc = false +dns_lookup_realm = false +ticket_lifetime = 86400 +renew_lifetime = 604800 +forwardable = true +default_tgs_enctypes = aes256-cts +default_tkt_enctypes = aes256-cts +permitted_enctypes = aes256-cts +udp_preference_limit = 1 +[realms] + = { +kdc = +admin_server = +} + diff --git a/charts/streamsets/control-agent/2.0.100/questions.yml b/charts/streamsets/control-agent/2.0.100/questions.yml new file mode 100644 index 000000000..b34ac722c --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/questions.yml @@ -0,0 +1,65 @@ +questions: +#image configurations +- variable: defaultImage + default: true + description: "Use default Streamsets image or specify a custom one" + label: Use Default Streamsets Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: image.repository + default: "streamsets/control-agent" + description: "Streamsets Control Agent Image Name" + type: string + label: Streamsets Control Agent Image Name + - variable: image.tag + default: "3.0.0" + description: "Streamsets Image Tag" + type: string + label: Streamsets Image Tag + - variable: image.pullPolicy + default: "IfNotPresent" + description: "Pull policy for the image" + type: string + label: Streamsets Pull Policy +#streamsets configurations +- variable: streamsets.orgId + default: "" + description: "This is the part of your Streamsets Control Hub username after the `@`" + type: string + label: Enable Org ID + required: true + group: "Streamsets Org ID" +- variable: streamsets.api.url + default: "https://cloud.streamsets.com" + description: "The URL for the Streamsets Control Hub instance to connect to. Default is Streamsets Hosted Service" + type: string + label: Enable Streamsets API URL + required: false + group: "Streamsets API URL" +- variable: streamsets.api.token + default: "" + description: "Agent auth token from the Streamsets Control Hub REST API or UI" + type: string + required: true + label: Agent Auth Token + group: "Agent Auth Token" +- variable: rbac.enabled + default: true + description: "Creates req'd ServiceAccount and Role on RBAC-enabled cluster" + type: boolean + label: Enable RBAC + group: "RBAC" +- variable: resources + default: "" + description: "Resource request for the Control Agent pod" + type: string + label: Resource request for Streamsets Control Agent + group: "Resource request" +- variable: nodeSelector + default: "" + description: "Node Selector to apply to the deployment " + type: string + label: Node Selector for Streamsets Control Agent + group: "Node Selector" diff --git a/charts/streamsets/control-agent/2.0.100/templates/NOTES.txt b/charts/streamsets/control-agent/2.0.100/templates/NOTES.txt new file mode 100644 index 000000000..f17b2e5d9 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/NOTES.txt @@ -0,0 +1,2 @@ +The agent has been successfully installed. +Visit {{ .Values.streamsets.api.url }}/sch/provisioning/deployments to continue setup. diff --git a/charts/streamsets/control-agent/2.0.100/templates/_helpers.tpl b/charts/streamsets/control-agent/2.0.100/templates/_helpers.tpl new file mode 100644 index 000000000..8288985e9 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "control-agent.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "control-agent.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "control-agent.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/streamsets/control-agent/2.0.100/templates/deployment.yaml b/charts/streamsets/control-agent/2.0.100/templates/deployment.yaml new file mode 100644 index 000000000..6dc75545b --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/deployment.yaml @@ -0,0 +1,87 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "control-agent.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ include "control-agent.fullname" . }} + {{- else }} + serviceAccountName: default + {{- end }} + terminationGracePeriodSeconds: 60 + containers: + - name: {{ include "control-agent.fullname" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.krb.enabled }} + volumeMounts: + - name: krb5conf + mountPath: "/opt/kerberos" + readOnly: true + {{- end}} + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- end }} + env: + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: dpm_agent_master_url + value: "https://kubernetes.default.svc.cluster.local" + - name: dpm_agent_cof_type + value: KUBERNETES + - name: dpm_agent_dpm_baseurl + value: {{ default "https://cloud.streamsets.com" .Values.streamsets.api.url }} + - name: dpm_agent_component_id + value: {{ .Release.Name }}-{{ .Values.streamsets.orgId }} + - name: dpm_agent_token_string + valueFrom: + secretKeyRef: + name: {{ include "control-agent.fullname" . }} + key: apiToken + - name: dpm_agent_name + value: {{ .Release.Name }} + - name: dpm_agent_orgId + value: {{required "An SCH orgId is required!" .Values.streamsets.orgId }} + - name: dpm_agent_secret + value: {{ include "control-agent.fullname" . }} + - name: dpm_agent_crd_enabled + value: "{{.Values.streamsets.crdEnabled}}" + {{- if .Values.krb.enabled }} + - name: dpm_agent_kerberos_enabled + value: "true" + - name: KRB5_CONFIG + value: "/opt/kerberos/krb5.conf" + - name: dpm_agent_kerberos_secret + value: kerbsecret + - name: dpm_agent_kdc_type + value: {{ .Values.krb.kdcType }} + {{- end}} + {{- if .Values.krb.enabled }} + volumes: + - name: krb5conf + secret: + secretName: krb5conf + {{- end}} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} diff --git a/charts/streamsets/control-agent/2.0.100/templates/krb5-secret.yaml b/charts/streamsets/control-agent/2.0.100/templates/krb5-secret.yaml new file mode 100644 index 000000000..6af832276 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/krb5-secret.yaml @@ -0,0 +1,32 @@ +{{- if .Values.krb.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: krb5conf + labels: + app: {{ template "control-agent.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: +{{ (.Files.Glob "krb/krb5.conf").AsSecrets | indent 2 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: kerbsecret + labels: + app: {{ template "control-agent.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + encryption_types: {{required "encryption types to use when creating a keytab for service principal!" .Values.krb.encryptionTypes | b64enc }} + container_dn: {{required "distinguished name of the container under which new principals will be created is required!" .Values.krb.containerDn | b64enc }} + ldap_url: {{required "URL of the LDAP service provider is required!" .Values.krb.ldapUrl | b64enc }} + admin_principal: {{required "user account which has privileges to create, search and destroy service principals is required!" .Values.krb.adminPrincipal | b64enc }} + admin_key : {{required "secret key for the admin principal is required!" .Values.krb.adminKey | b64enc }} + realm : {{required "the realm of the organization is required!" .Values.krb.realm | b64enc }} +{{- end}} diff --git a/charts/streamsets/control-agent/2.0.100/templates/rbac.yaml b/charts/streamsets/control-agent/2.0.100/templates/rbac.yaml new file mode 100644 index 000000000..29607415a --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/rbac.yaml @@ -0,0 +1,91 @@ +{{- if .Values.rbac.enabled }} +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ include "control-agent.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + namespace: {{ .Release.Namespace }} + name: {{ include "control-agent.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: ["", "extensions", "autoscaling", "apps"] + resources: ["pods", "deployments", "replicasets", "horizontalpodautoscalers", "services", "ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update"] +{{- if .Values.streamsets.crdEnabled }} +- apiGroups: ["streamsets.k8s.io"] + resources: ["sdcs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ include "control-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "control-agent.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "control-agent.fullname" . }} + namespace: {{ .Release.Namespace }} +--- +{{- if .Values.streamsets.crdEnabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: streamsets-crd-handler + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions", ] + verbs: ["get", "list", "watch"] +- apiGroups: ["apiextensions.k8s.io", "streamsets.k8s.io"] + resources: ["customresourcedefinitions", "sdcs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "control-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: streamsets-crd-handler +subjects: +- kind: ServiceAccount + name: {{ include "control-agent.fullname" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/charts/streamsets/control-agent/2.0.100/templates/sdc.yaml b/charts/streamsets/control-agent/2.0.100/templates/sdc.yaml new file mode 100644 index 000000000..bd061f56e --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/sdc.yaml @@ -0,0 +1,22 @@ +{{- if .Values.streamsets.crdEnabled }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: sdcs.streamsets.k8s.io + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + annotations: + "helm.sh/hook": crd-install + "helm.sh/hook-delete-policy": before-hook-creation +scope: Namespaced +spec: + group: streamsets.k8s.io + version: v1 + names: + kind: SdcCustomResource + plural: sdcs + singular: sdc +{{- end }} diff --git a/charts/streamsets/control-agent/2.0.100/templates/token-secret.yaml b/charts/streamsets/control-agent/2.0.100/templates/token-secret.yaml new file mode 100644 index 000000000..fa397f495 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/templates/token-secret.yaml @@ -0,0 +1,12 @@ +kind: Secret +apiVersion: v1 +metadata: + name: {{ include "control-agent.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "control-agent.name" . }} + helm.sh/chart: {{ include "control-agent.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +type: Opaque +data: + apiToken: {{ required "A Control Agent API token is required!" .Values.streamsets.api.token | b64enc }} diff --git a/charts/streamsets/control-agent/2.0.100/values.yaml b/charts/streamsets/control-agent/2.0.100/values.yaml new file mode 100644 index 000000000..3ccdef683 --- /dev/null +++ b/charts/streamsets/control-agent/2.0.100/values.yaml @@ -0,0 +1,28 @@ +# Default values for Streamsets Control Agent +image: + repository: streamsets/control-agent + tag: latest + pullPolicy: Always +streamsets: + orgId: + crdEnabled: false + api: + url: https://cloud.streamsets.com + token: +rbac: + enabled: true +krb: + enabled: false + encryptionTypes: + containerDn: + ldapUrl: + adminPrincipal: + adminKey: + realm: + kdcType: < AD | MIT > +## +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +nodeSelector: {} diff --git a/charts/sysdig/sysdig/1.9.200/CHANGELOG.md b/charts/sysdig/sysdig/1.9.200/CHANGELOG.md new file mode 100644 index 000000000..3b017ffe4 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/CHANGELOG.md @@ -0,0 +1,437 @@ +# Chart: Sysdig + +## Change Log + +This file documents all notable changes to Sysdig Helm Chart. The release +numbering uses [semantic versioning](http://semver.org). + +## v1.9.2 + +### Minor changes + +* Use latest image from Agent (10.3.0) + +## v1.9.1 + +### Minor changes + +* Remove explicit *onPrem* option. Use *collectorSettings* section instead. + +## v1.9.0 + +### Major changes + +* Option to deploy the [Node Image Analyzer](https://docs.sysdig.com/en/scan-running-images.html). + +### Minor changes + +* Include get/list/watch endpoints in agent clusterrole permissions. + +## v1.8.1 + +### Minor changes + +* Use the latest image from Agent (10.2.0) by default. + +### Bug fixes + +* Fix logic in template that was disabling captures in the agent settings. + +## v1.8.0 + +### Major changes + +* Migrated charts to *sysdiglabs* repository + +### Minor changes + +* Add explicit *clusterName* option in values.yaml +* Add beta.kubernetes.io labels for node affinity, to support older versions +* SCC deployed by default in Openshift (check API security.openshift.io/v1) + +## v1.7.20 + +### Minor changes + +* Use the latest image from Agent (10.1.1) by default. + +## v1.7.19 + +### Minor changes + +* Use the latest image from Agent (10.1.0) by default. + +## v1.7.18 + +### Minor changes + +* Add explicit *disable captures* option to agent settings. + +## v1.7.17 + +### Minor changes + +* Add onPrem as explicit option to set collector host, port and settings +* Fail if no sysdig.accessKey value is provided + +## v1.7.16 + +### Minor changes + +* Include support links in README.md + +## v1.7.15 + +### Minor changes + +* Use the latest image from Agent (10.0.0) by default. + +## v1.7.14 + +### Minor changes + +* Implement a more comprehensive securityContext for running the pod. + +## v1.7.13 + +### Minor changes + +* Implement scheduling with affinity and not with nodeSelector on amd64 & linux nodes. +* Add support for custom annotations on daemonSet. + +## v1.7.12 + +### Minor changes + +* Use the latest image from Agent (9.9.1) by default. +* Use kubernetes.io/arch label on daemonSet to schedule pods only on amd64 nodes. +* Add a livenessProbe to daemonSet. + +## v1.7.11 + +### Minor changes + +* Use app.kubernetes.io labels instead of custom ones + +## v1.7.10 + +### Minor changes + +* Use the latest image from Agent (9.9.0) by default. + +## v1.7.9 + +### Minor changes + +* Add the SecurityContextConstraints if the security.openshift.io/v1 API is detected. + +## v1.7.8 + +### Minor changes + +* Add an image.overrideValue value which is a hack to support + RELATED_IMAGE_ feature in Helm based operators. + +## v1.7.7 + +### Minor changes + +* Use the latest image from Agent (9.8.0) by default. + +## v1.7.6 + +### Minor changes + +* Use rbac.authorization.k8s.io/v1 instead of the beta1 API. +* Fix security key duplication when enabling secure and auditLog. + +## v1.7.5 + +### Minor changes + +* Use the latest image from Agent (9.7.0) by default. + +## v1.7.4 + +### Minor changes + +* Use the latest image from Agent (9.6.1) by default. + +## v1.7.3 + +### Minor changes + +* Removed dependency on ebpf.enabled to set environment variables + +## v1.7.2 + +### Minor changes + +* Use the latest image from Agent (9.5.0) by default. + +## v1.7.1 + +### Major changes + +* Remove the auditLog.clusterIP dependency. Using dynamic backend allows to + rely on DNS queries. + +## v1.7.0 + +### Major changes + +* Enable Sysdig Secure by default. + +## v1.6.0 + +### Major changes + +* Add audit log configuration when deploying the agent. + +## v1.5.0 + +### Major changes + +* Add slim configuration for deploying the agent. + +### Minor changes + +* Mount /etc/modprobe.d from host. +* Drop permissions to read secrets and configmaps. + +## v1.4.25 + +### Minor changes + +* Use the latest image from Agent (0.94.0) by default. + +## v1.4.24 + +### Minor changes + +* Use the latest image from Agent (0.93.1) by default. + +## v1.4.23 + +### Minor changes + +* Update NOTES.txt to use the newest URL for finding the infrastructure. + +## v1.4.22 + +### Minor changes + +* Use the latest image from Agent (0.93.0) by default. + +## v1.4.21 + +* Add 'How to upgrade to last version' to the README + +## v1.4.20 + +### Minor changes + +* Fixes compatibility errors introduced in v1.4.19. + +## v1.4.19 + +### Minor changes + +* Fixes compatibility with kubernetes 1.16. + +## v1.4.18 + +### Minor changes + +* Use the latest image from Agent (0.92.3) by default. + +## v1.4.17 + +### Minor changes + +* Use the latest image from Agent (0.92.2) by default. + +## v1.4.16 + +### Minor changes + +* Allow the DaemonSet to schedule using affinity rules + +## v1.4.15 + +### Minor changes + +* Add configmaps and secrets to the resources we can read +* Add support for priorityClassName, httpProxy, timezone and any env variable settings + +## v1.4.14 + +### Minor changes + +* Update REAMED.md to fix the example in how to use the `sysdig.settings.tags` in the command line with `--set` + +## v1.4.13 + +### Minor changes + +* Use the latest image from Agent (0.92.1) by default. +* Increase `resources.requests` and `resources.limits` to match the [values + provided by Sysdig's agent team.](https://github.com/draios/sysdig-cloud-scripts/blob/master/agent_deploy/kubernetes/sysdig-agent-daemonset-v2.yaml#L70) + +## v1.4.12 + +### Minor changes + +* Use the latest image from Agent (0.92.0) by default. + +## v1.4.11 + +### Minor Changes + +* Add nestorsalceda as an approver in the OWNERS file + +## v1.4.10 + +### Minor Changes + +* Use the latest image from Agent (0.90.3) by default. + +## v1.4.9 + +### Minor Changes + +* Use the latest image from Agent (0.90.2) by default. + +## v1.4.8 + +### Minor Changes + +* Add a volume with the os release information. +* Use the latest image from Agent (0.90.1) by default. + +## v1.4.7 + +### Minor Changes + +* Add apiVersion to Chart.yaml. + +## v1.4.6 + +### Minor Changes + +* Dont allow to change the value of `new_k8s` flag. + +## v1.4.5 + +### Minor Changes + +* Enable `new_k8s` flag by default. This allows kube state metrics to be + automatically detected, monitored, and displayed in Sysdig Monitor. + +## v1.4.4 + +### Minor Changes + +* Use the latest image from Agent (0.89.5) by default. +* Add `persistentvolumes` and `persistentvolumeclaims` to ClusterRole + +## v1.4.3 + +### Minor Changes + +* Provide an empty value to `sysdig.accessKey` key. + +## v1.4.2 + +### Minor Changes + +* Use the latest image from Agent (0.89.4) by default. +* Use latest shovel logo. + +## v1.4.0 + +### Major Changes + +* Use the latest image from Agent (0.89.0) by default. +* eBPF support added. + +## v1.3.2 + +### Minor Changes + +* Provide sane defaults resources for the Sysdig Agent. +* Use RollingUpdate strategy by default. + +## v1.3.1 + +### Minor Changes + +* Revert v1.2.1 changes. The agent automatically restarts when detects a change in the configuration. + +## v1.3.0 + +### Major Changes + +* Use a lower pod termination grace period for avoiding data gaps when pod fails to terminate quickly. +* Check running file on readinessProbe instead of relaying on logs. +* Mount /run and /var/run instead of Docker socket. It allows to access CRI / containerd socket. +* Avoid floating references for the image. + +## v1.2.2 + +### Minor Changes + +* Fix value in the agent tags example. + +## v1.2.1 + +### Minor Changes + +* Add checksum annotations to DaemonSet so that rolling upgrades works when a ConfigMap changes. + +## v1.2.0 + +### Major Changes + +* Allow to use other Docker registries (ECR, Quay ...) to download the Sysdig agent image. + +## v1.1.0 + +### Major Changes + +* Add support for uploading custom app checks for Sysdig agent + +## v1.0.4 + +### Minor Changes + +* Update README file with instructions for setting up the agent with On-Premise deployments + +## v1.0.3 + +### Minor Changes + +* Fixed error in ClusterRoleBinding's roleRef + +## v1.0.2 + +### Minor Changes + +* Fix readinessProbe in daemonset's pod spec + +## v1.0.1 + +### Minor Changes + +* Add dnsPolicy to daemonset. Its value is ClusterFirstWithHostNet +* Fix link target for retrieving Sysdig Monitor Access Key in README + +## v1.0.0 + +### Major Changes + +* Run Sysdig agent as [daemonset v2.0](https://github.com/draios/sysdig-cloud-scripts/blob/master/agent_deploy/kubernetes/sysdig-agent-daemonset-v2.yaml). +* Fix value's naming in order to follow [best practices](https://docs.helm.sh/chart_best_practices/#naming-conventions). +* Use a secure.enabled flag for enabling Sysdig Secure. +* Allow rbac resource creation or use existing serviceAccountName. +* Use required function for retrieving sysdig.accessKey. This ensures that key is present. diff --git a/charts/sysdig/sysdig/1.9.200/Chart.yaml b/charts/sysdig/sysdig/1.9.200/Chart.yaml new file mode 100644 index 000000000..8bff7b41f --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: sysdig +apiVersion: v1 +appVersion: 10.3.0 +description: Sysdig Monitor and Secure agent +home: https://www.sysdig.com/ +icon: https://478h5m1yrfsa3bbe262u7muv-wpengine.netdna-ssl.com/wp-content/uploads/2019/02/Shovel_600px.png +keywords: +- monitoring +- security +- alerting +- metric +- troubleshooting +- run-time +maintainers: +- email: lachlan@deis.com + name: lachie83 +- email: jorge.salamero@sysdig.com + name: bencer +- email: nestor.salceda@sysdig.com + name: nestorsalceda +- email: alvaro.iradier@sysdig.com + name: airadier +- email: carlos.arilla@sysdig.com + name: carillan81 +name: sysdig +sources: +- https://app.sysdigcloud.com/#/settings/user +- https://github.com/draios/sysdig +version: 1.9.200 diff --git a/charts/sysdig/sysdig/1.9.200/DESIGN.md b/charts/sysdig/sysdig/1.9.200/DESIGN.md new file mode 100644 index 000000000..607f73d67 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/DESIGN.md @@ -0,0 +1,34 @@ +# Chart: Sysdig + +## Design and Known Issues Justification Document + +### Goal + +The goal of this file is to document and give some context about some issues that +forced me to take some decisions in this Chart. + +### Loading Custom App Checks using a ConfigMap and a Yaml file + +In Helm, we are not able to add an external file to a Chart deployment, in fact, +there is an [issue](https://github.com/helm/helm/issues/3276) about this. + +This means that external files like SSL certificates or pluggable files, like +Falco rules or Custom App Checks, should be managed using Helm exclusively. You +can see comments in [first Falco pull request](https://github.com/helm/charts/pull/5853). + +And the way to manage them using Helm is to pass file contents as values to Chart +deployment. A nice tip is using a Yaml file and pass to deployment command line +using the -f flag. + +### OpenShift support + +Right now, there are an issue in [OpenShift](https://github.com/openshift/origin/issues/20788) +and other in [Helm](https://github.com/helm/helm/issues/4533) that makes a bit +cumbersome the OpenShift support for this Chart. + +Eventually, they will be fixed. But meanwhile a workaround is to create a +serviceAccount using the `oc` utility. Also manage permissions for creating privileged +containers and allowing hostPath mount with `oc` and deploy the Chart with the +`serviceAccount.name` created with `oc`. + +You can see more details about this workaround on [Sysdig Documentation about OpenShift](https://sysdigdocs.atlassian.net/wiki/spaces/Platform/pages/256671843/). diff --git a/charts/sysdig/sysdig/1.9.200/OWNERS b/charts/sysdig/sysdig/1.9.200/OWNERS new file mode 100644 index 000000000..a424cc075 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/OWNERS @@ -0,0 +1,6 @@ +approvers: +- bencer +- nestorsalceda +reviewers: +- bencer +- nestorsalceda diff --git a/charts/sysdig/sysdig/1.9.200/README-AWS.md b/charts/sysdig/sysdig/1.9.200/README-AWS.md new file mode 100644 index 000000000..a9eea4564 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/README-AWS.md @@ -0,0 +1,45 @@ +# Chart: Sysdig + +## Deploying the AWS Marketplace Sysdig agent image + +This is an use case similar to pulling images from a private registry. First you +need to get the authorization token for the AWS Marketplace ECS image registry: + +```bash +aws ecr --region=us-east-1 get-authorization-token --output text --query authorizationData[].authorizationToken | base64 -d | cut -d: -f2 +``` + +And then use it to create the Secret. Don't forget to replace TOKEN and EMAIL +with your own values: + +```bash +kubectl create secret docker-registry aws-marketplace-credentials \ + --docker-server=217273820646.dkr.ecr.us-east-1.amazonaws.com \ + --docker-username=AWS \ + --docker-password="TOKEN" \ + --docker-email="EMAIL" +``` + +Next you need to create a values YAML file to pass the specific ECS registry +configuration (you will find these values when you activate the software from +the AWS Marketplace): + +```yaml +sysdig: + accessKey: XxxXXxXXxXXxxx + +image: + registry: 217273820646.dkr.ecr.us-east-1.amazonaws.com + repository: 2df5da52-6fa2-46f6-b164-5b879e86fd85/cg-3361214151/agent + tag: 0.85.1-latest + pullSecrets: + - name: aws-marketplace-credentials +``` + +Finally, set the accessKey value and you are ready to deploy the Sysdig agent +using the Helm chart: + +```bash +helm install --name sysdig-agent -f aws-marketplace-values.yaml stable/sysdig +``` + diff --git a/charts/sysdig/sysdig/1.9.200/README.md b/charts/sysdig/sysdig/1.9.200/README.md new file mode 100644 index 000000000..cac7539cb --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/README.md @@ -0,0 +1,377 @@ +# Chart: Sysdig + +[Sysdig](https://sysdig.com/) is a unified platform for container and microservices monitoring, troubleshooting, security and forensics. Sysdig platform has been built on top of [Sysdig tool](https://sysdig.com/opensource/sysdig/) and [Sysdig Inspect](https://sysdig.com/blog/sysdig-inspect/) open-source technologies. + +## Introduction + +This chart adds the Sysdig agent for [Sysdig Monitor](https://sysdig.com/product/monitor/) and [Sysdig Secure](https://sysdig.com/product/secure/) to all nodes in your cluster via a DaemonSet. + +## Prerequisites + +- Kubernetes 1.9+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`, retrieve your Sysdig Monitor Access Key from your [Account Settings](https://app.sysdigcloud.com/#/settings/agentInstallation) and run: + +```bash +$ helm repo add sysdiglabs https://sysdiglabs.github.io/charts/ +``` + +to add the `sysdiglabs` Helm chart repository. Then run: + +```bash +$ helm install --name my-release --set sysdig.accessKey=YOUR-KEY-HERE sysdiglabs/sysdig +``` + +After a few seconds, you should see hosts and containers appearing in Sysdig Monitor and Sysdig Secure. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` +> **Tip**: Use helm delete --purge my-release to completely remove the release from Helm internal storage + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the Sysdig chart and their default values. + +| Parameter | Description | Default | +| --- | --- | --- | +| `image.registry` | Sysdig Agent image registry | `docker.io` | +| `image.repository` | The image repository to pull from | `sysdig/agent` | +| `image.tag` | The image tag to pull | `10.3.0` | +| `image.pullPolicy` | The Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `nil` | +| `resources.requests.cpu` | CPU requested for being run in a node | `600m` | +| `resources.requests.memory` | Memory requested for being run in a node | `512Mi` | +| `resources.limits.cpu` | CPU limit | `2000m` | +| `resources.limits.memory` | Memory limit | `1536Mi` | +| `rbac.create` | If true, create & use RBAC resources | `true` | +| `scc.create` | Create OpenShift's Security Context Constraint | `true` | +| `serviceAccount.create` | Create serviceAccount | `true` | +| `serviceAccount.name` | Use this value as serviceAccountName | ` ` | +| `daemonset.updateStrategy.type` | The updateStrategy for updating the daemonset | `RollingUpdate` | +| `daemonset.affinity` | Node affinities | `schedule on amd64 and linux` | +| `daemonset.annotations` | Custom annotations for daemonset | `{}` | +| `slim.enabled` | Use the slim based Sysdig Agent image | `false` | +| `slim.kmoduleImage.repository` | The kernel module image builder repository to pull from | `sysdig/agent-kmodule` | +| `slim.resources.requests.cpu` | CPU requested for building the kernel module | `1000m` | +| `slim.resources.requests.memory` | Memory requested for building the kernel module | `348Mi` | +| `slim.resources.limits.memory` | Memory limit for building the kernel module | `512Mi` | +| `ebpf.enabled` | Enable eBPF support for Sysdig instead of `sysdig-probe` kernel module | `false` | +| `ebpf.settings.mountEtcVolume` | Needed to detect which kernel version are running in Google COS | `true` | +| `clusterName` | Set a cluster name to identify events using *kubernetes.cluster.name* tag | ` ` | +| `sysdig.accessKey` | Your Sysdig Monitor Access Key | `Nil` You must provide your own key | +| `sysdig.disableCaptures` | Disable capture functionality (see https://docs.sysdig.com/en/disable-captures.html) | `false` | +| `sysdig.settings` | Additional settings, directly included in the agent's configuration file `dragent.yaml` | `{}` | +| `secure.enabled` | Enable Sysdig Secure | `true` | +| `auditLog.enabled` | Enable K8s audit log support for Sysdig Secure | `false` | +| `auditLog.auditServerUrl` | The URL where Sysdig Agent listens for K8s audit log events | `0.0.0.0` | +| `auditLog.auditServerPort` | Port where Sysdig Agent listens for K8s audit log events | `7765` | +| `auditLog.dynamicBackend.enabled` | Deploy the Audit Sink where Sysdig listens for K8s audit log events | `false` | +| `customAppChecks` | The custom app checks deployed with your agent | `{}` | +| `nodeImageAnalyzer.deploy` | Deploy the Node Image Analyzer (See https://docs.sysdig.com/en/scan-running-images.html)| `false` | +| `nodeImageAnalyzer.image.repository` | The image repository to pull the Node Image Analyzer from | `sysdig/node-image-analyzer` | +| `nodeImageAnalyzer.image.tag` | The image tag to pull the Node Image Analyzer | `0.1.1` | +| `nodeImageAnalyzer.image.pullPolicy` | The Image pull policy for the Node Image Analyzer | `IfNotPresent` | +| `nodeImageAnalyzer.image.pullSecrets` | Image pull secrets for the Node Image Analyzer | `nil` | +| `nodeImageAnalyzer.resources.requests.cpu` | Node Image Analyzer CPU requests per node | `250m` | +| `nodeImageAnalyzer.resources.requests.memory` | Node Image Analyzer Memory requests per node | `512Mi` | +| `nodeImageAnalyzer.resources.limits.cpu` | Node Image Analyzer CPU limit per node | `500m` | +| `nodeImageAnalyzer.resources.limits.memory` | Node Image Analyzer Memory limit per node | `1024Mi` | +| `nodeImageAnalyzer.settings` | Additional Node Image Analyzer settings | `{}` | +| `tolerations` | The tolerations for scheduling | `node-role.kubernetes.io/master:NoSchedule` | +| `prometheus.file` | Use file to configure promscrape | `false` | +| `prometheus.yaml` | prometheus.yaml content to configure metric collection: relabelling and filtering | ` ` | +| `extraVolume.volumes` | Additional volumes to mount in the sysdig agent to pass new secrets or configmaps | `[]` | +| `extraVolume.mounts` | Mount points for additional volumes | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release \ + --set sysdig.accessKey=YOUR-KEY-HERE,sysdig.settings.tags="role:webserver\,location:europe" \ + sysdiglabs/sysdig +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml sysdiglabs/sysdig +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## On-Premise backend deployment settings + +Sysdig platform backend can be also deployed On-Premise in your own infrastructure. + +Installing the agent using the Helm chart is also possible in this scenario, and you can enable it with the following parameters: + +| Parameter | Description | Default | +| --- | --- | --- | +| `collectorSettings.collectorHost` | The IP address or hostname of the collector | ` ` | +| `collectorSettings.collectorPort` | The port where collector is listening | 6443 | +| `collectorSettings.ssl` | The collector accepts SSL | `true` | +| `collectorSettings.sslVerifyCertificate` | Set to false if you don't want to verify SSL certificate | `true` | + +For example: + +```bash +$ helm install --name my-release \ + --set sysdig.accessKey=YOUR-KEY-HERE \ + --set collectorSettings.collectorHost=42.32.196.18 \ + --set collectorSettings.collectorPort=6443 \ + --set collectorSettings.sslVerifyCertificate=false \ + sysdiglabs/sysdig +``` + +## Using private Docker image registry + +If you pull the Sysdig agent Docker image from a private registry that requires authentication, some additional configuration is required. + +First, create a secret that stores the registry credentials: + +```bash +$ kubectl create secret docker-registry SECRET_NAME \ + --docker-server=SERVER \ + --docker-username=USERNAME \ + --docker-password=TOKEN \ + --docker-email=EMAIL +``` + +Then, point to this secret in the values YAML file: + +```yaml +sysdig: + accessKey: YOUR-KEY-HERE +image: + registry: myrepo.mydomain.tld + repository: sysdig-agent + tag: latest-tag + pullSecrets: + - name: SECRET_NAME +``` + +Finally, set the accessKey value and you are ready to deploy the Sysdig agent +using the Helm chart: + +```bash +$ helm install --name my-release -f values.yaml sysdiglabs/sysdig +``` + +You can read more details about this in [Kubernetes Documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + +## Modifying Sysdig agent configuration + +The Sysdig agent uses a file called `dragent.yaml` to store the configuration. + +Using the Helm chart, the default configuration settings can be updated using `sysdig.settings` either via `--set sysdig.settings.key = value` or in the values YAML file. For example, to eanble Prometheus metrics scraping, you need this in your `values.yaml` file:: + +```yaml +sysdig: + accessKey: YOUR-KEY-HERE + settings: + prometheus: + enabled: true + histograms: true +``` + +```bash +$ helm install --name my-release -f values.yaml sysdiglabs/sysdig +``` + +## Upgrading Sysdig agent configuration + +If you need to upgrade the agent configuration file, first modify the YAML file (in this case we are increasing the metrics limit scraping Prometheus metrics): + +```yaml +sysdig: + accessKey: YOUR-KEY-HERE + settings: + prometheus: + enabled: true + histograms: true + max_metrics: 2000 + max_metrics_per_process: 400 +``` + +And then, upgrade Helm chart with: + +```bash +$ helm upgrade my-release -f values.yaml sysdiglabs/sysdig +``` + +## How to upgrade to the last version + +First of all ensure you have the lastest chart version + +```bash +$ helm repo update +``` + +In case you deployed the chart with a values.yaml file, you just need to modify (or add if it's missing) the `image.tag` field and execute: + +```bash +$ helm install --name sysdig -f values.yaml sysdiglabs/sysdig +``` + +If you deployed the chart setting the values as CLI parameters, like for example: + +```bash +$ helm install \ + --name sysdig \ + --set sysdig.accessKey=xxxx \ + --set ebpf.enabled=true \ + --namespace sysdig-agent \ + sysdiglabs/sysdig +``` + +You will need to execute: + +```bash +$ helm upgrade --set image.tag= --reuse-values sysdig sysdiglabs/sysdig +``` + +## Adding custom AppChecks + +[Application checks](https://sysdigdocs.atlassian.net/wiki/spaces/Monitor/pages/204767363/) are integrations that allow the Sysdig agent to collect metrics exposed by specific services. Sysdig has several built-in AppChecks, but sometimes you might need to [create your own](https://sysdigdocs.atlassian.net/wiki/spaces/Monitor/pages/204767436/). + +Your own AppChecks can deployed with the Helm chart embedding them in the values YAML file: + +```yaml +customAppChecks: + sample.py: |- + from checks import AgentCheck + + class MyCustomCheck(AgentCheck): + def check(self, instance): + self.gauge("testhelm", 1) + +sysdig: + accessKey: YOUR-KEY-HERE + settings: + app_checks: + - name: sample + interval: 10 + pattern: # pattern to match the application + comm: myprocess + conf: + mykey: myvalue +``` + +The first section, dumps the AppCheck in a Kubernetes configmap and makes it available within the Sysdig agent container. The second, configures it on the `dragent.yaml` file. + +Once the values YAML file is ready, we will deploy the Chart like before: + +```bash +$ helm install --name my-release -f values.yaml sysdiglabs/sysdig +``` + +### Automating the generation of custom-app-checks.yaml file + +Sometimes editing and maintaining YAML files can be a bit cumbersome and error-prone, so we have created a script for automating this process and make your life easier. + +Imagine that you have custom AppChecks for a number of services like Redis, MongoDB and Traefik. + +You have already a `values.yaml` with just your configuration: + +```yaml +sysdig: + accessKey: YOUR-KEY-HERE + settings: + app_checks: + - name: myredis + [...] + - name: mymongo + [...] + - name: mytraefik + [...] +``` + +You can generate an additional values YAML file with the custom AppChecks: + +```bash +$ git clone https://github.com/kubernetes/charts.git +$ cd charts/sysdiglabs/sysdig +$ ./scripts/appchecks2helm appChecks/solr.py appChecks/traefik.py appChecks/nats.py > custom-app-checks.yaml +``` + +And deploy the Chart with both of them: + +```bash +$ helm install --name my-release -f custom-app-checks.yaml -f values.yaml sysdiglabs/sysdig +``` + +### Adding prometheus.yaml to configure promscrape + +Promscrape is the component used to collect Prometheus metrics from the sysdig agent. It is based on Prometheus and accepts the same configuration format. + +This file can contain relabelling rules and filters to remove certain metrics or add some configurations to the collection. An example of this file could be: + +```yaml +global: + scrape_interval: 15s + evaluation_interval: 15s +scrape_configs: +- job_name: 'prometheus' # config for federation + honor_labels: true + metrics_path: '/federate' + metric_relabel_configs: + - regex: 'kubernetes_pod_name' + action: labeldrop + params: + 'match[]': + - '{sysdig="true"}' + sysdig_sd_configs: + - tags: + namespace: monitoring + deployment: prometheus-server +``` +`sysdig_sd_configs` allows to select the targets obtained by Sysdig agents to apply the rules in the job. Check [how to configure filtering in sysdig documentation](https://docs.sysdig.com/en/filtering-prometheus-metrics.html). + + +### Adding additional volumes + +To add a new volume to the sysdig agent. + +In order to pass new config maps or secrets used for authentication (for example for Prometheus endpoints) you can mount additional secrets, configmaps or volumes. An example of this could be: + +```yaml +extraVolumes: + volumes: + - name: sysdig-new-cm + configMap: + name: my-cm + optional: true + - name: sysdig-new-secret + secret: + secretName: my-secret + mounts: + - mountPath: /opt/draios/cm + name: sysdig-new-cm + - mountPath: /opt/draios/secret + name: sysdig-new-secret +``` + +## Support + +For getting support from the Sysdig team, you should refer to the official +[Sysdig Support page](https://sysdig.com/support). + +In addition to this, you can browse the documentation for the different +components of the Sysdig Platform: + +* [Sysdig Monitor](https://app.sysdigcloud.com) +* [Sysdig Secure](https://secure.sysdig.com) +* [Platform Documentation](https://docs.sysdig.com/en/sysdig-platform.html) +* [Monitor Documentation](https://docs.sysdig.com/en/sysdig-monitor.html) +* [Secure Documentation](https://docs.sysdig.com/en/sysdig-secure.html) diff --git a/charts/sysdig/sysdig/1.9.200/app-readme.md b/charts/sysdig/sysdig/1.9.200/app-readme.md new file mode 100644 index 000000000..80c6133f0 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/app-readme.md @@ -0,0 +1,20 @@ +# Sysdig Secure DevOps Platform + +Sysdig enables companies to confidently run cloud-native workloads in production. With the Sysdig Secure DevOps Platform, cloud teams embed security, maximize availability, and validate compliance. The Sysdig platform is open by design, with the scale, performance, and usability enterprises demand. The largest companies rely on Sysdig for cloud-native security and visibility. + +## Embed security +* Detect vulnerabilities and misconfigurations with a single workflow +* Block threats without impacting performance using K8s controls +* Conduct forensics even after the container is gone + +## Maximize availability +* Prevent issues by monitoring performance and capacity +* Accelerate troubleshooting with a single source of truth +* Scale Prometheus monitoring across clusters and clouds + +## Validate compliance +* Verify configuration meets CIS best practices +* Ensure application compliance with NIST, PCI +* Enable audit by correlating Kubernetes activity + +Learn more at [sysdig.com](https://sysdig.com/) diff --git a/charts/sysdig/sysdig/1.9.200/ci/test-values.yaml b/charts/sysdig/sysdig/1.9.200/ci/test-values.yaml new file mode 100644 index 000000000..a6745f8b2 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/ci/test-values.yaml @@ -0,0 +1,2 @@ +sysdig: + accessKey: xxx diff --git a/charts/sysdig/sysdig/1.9.200/questions.yml b/charts/sysdig/sysdig/1.9.200/questions.yml new file mode 100644 index 000000000..26e0d4358 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/questions.yml @@ -0,0 +1,100 @@ +questions: +#image configurations +- variable: defaultImage + default: true + description: "Use default Sysdig image or specify a custom one" + label: Use Default Sysdig Image + type: boolean + show_subquestion_if: false + group: "Container Images" + subquestions: + - variable: image.repository + default: "sysdig/agent" + description: "Sysdig Image Name" + type: string + label: Sysdig Image Name + - variable: image.tag + default: "10.3.0" + description: "Sysdig Image Tag" + type: string + label: Sysdig Image Tag +#agent configurations +- variable: sysdig.accessKey + default: "" + description: "You need your Sysdig accessKey before running agents" + type: string + required: true + label: Sysdig accessKey +- variable: sysdig.backend + default: "Sysdig SaaS" + description: "Where is Sysdig backend hosted on" + type: enum + label: Sysdig Backend + group: "Agent Configuration" + required: true + options: + - "sysdig-saas" + - "self-hosted" +- variable: sysdig.settings.collector + required: true + default: "collector.sysdigcloud.com" + description: "The host of the Sysdig collector the agent sends data to, only set this option if you need the agent to send data to a custom backend" + type: string + label: Sysdig Collector + group: "Agent Configuration" + show_if: "sysdig.backend=self-hosted" +- variable: sysdig.settings.collector_port + required: true + default: "6443" + description: "The port where the Sysdig collector listens to" + type: string + label: Sysdig Collector Port + group: "Agent Configuration" + show_if: "sysdig.backend=self-hosted" +- variable: sysdig.settings.ssl + required: true + default: true + description: "Use SSL to connect to the Sysdig collector" + type: boolean + label: Sysdig Collector SSL + group: "Agent Configuration" + show_if: "sysdig.backend=self-hosted" +- variable: sysdig.settings.ssl_verify_certificate + required: true + default: true + description: "Validate SSL certificate from the Sysdig collector" + type: boolean + label: Sysdig Collector Verify SSL Certificate + group: "Agent Configuration" + show_if: "sysdig.backend=self-hosted&&sysdig.settings.ssl=true" +- variable: sysdig.settings.tags + default: "" + description: "Agent tags, separated by commas. For example: 'linux:ubuntu,dept:dev,local:nyc'" + type: string + label: Agent Tags + group: "Agent Configuration" +- variable: ebpf.enabled + default: false + description: "Enable eBPF support for Sysdig agent instead of kernel module" + type: boolean + label: Enable eBPF + group: "Agent Configuration" +#proxy configurations +- variable: proxy.httpProxy + default: "" + description: "An http URL to use as a proxy for http requests" + type: string + label: Proxy for HTTP Requests + group: "Proxy Configuration" +- variable: proxy.httpsProxy + default: "" + description: "An http URL to use as a proxy for https requests" + type: string + label: Proxy for HTTPS Requests + group: "Proxy Configuration" +- variable: proxy.noProxy + default: "" + description: "A space-separated list of URLs for which no proxy should be used" + type: string + label: No Proxy List (separated by a space) + group: "Proxy Configuration" diff --git a/charts/sysdig/sysdig/1.9.200/scripts/appchecks2helm b/charts/sysdig/sysdig/1.9.200/scripts/appchecks2helm new file mode 100644 index 000000000..f9e0f2d21 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/scripts/appchecks2helm @@ -0,0 +1,11 @@ +#!/bin/bash + +echo "customAppChecks:" +for app_check in "$@" +do + echo -e " $(basename $app_check): |-" + while IFS= read -r line + do + echo -e " $line" + done <"$app_check" +done diff --git a/charts/sysdig/sysdig/1.9.200/templates/NOTES.txt b/charts/sysdig/sysdig/1.9.200/templates/NOTES.txt new file mode 100644 index 000000000..131548b8a --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/NOTES.txt @@ -0,0 +1,9 @@ +The agent for Sysdig Secure DevOps Platform is spinning up on each node in your +cluster. After a few seconds, you should see your hosts appearing in the +Explore tab: + + https://app.sysdigcloud.com/#/explore/overview/l:10 + + https://secure.sysdig.com/#/events/l:600/*/*?viewAs=list + +No further action should be required. diff --git a/charts/sysdig/sysdig/1.9.200/templates/_helpers.tpl b/charts/sysdig/sysdig/1.9.200/templates/_helpers.tpl new file mode 100644 index 000000000..5649026a7 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/_helpers.tpl @@ -0,0 +1,109 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "sysdig.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sysdig.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "sysdig.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "sysdig.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "sysdig.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the proper imageRegistry to use for agent and kmodule image +*/}} +{{- define "sysdig.imageRegistry" -}} +{{- if and .Values.global (hasKey (default .Values.global dict) "imageRegistry") -}} + {{- .Values.global.imageRegistry -}} +{{- else -}} + {{- .Values.image.registry -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Sysdig Agent image name +*/}} +{{- define "sysdig.repositoryName" -}} +{{- .Values.image.repository -}} {{- if .Values.slim.enabled -}} -slim {{- end -}} +{{- end -}} + +{{- define "sysdig.image" -}} +{{- if .Values.image.overrideValue }} + {{- printf .Values.image.overrideValue -}} +{{- else -}} + {{- include "sysdig.imageRegistry" . -}} / {{- include "sysdig.repositoryName" . -}} : {{- .Values.image.tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Sysdig Agent image name for module building +*/}} +{{- define "sysdig.image.kmodule" -}} + {{- include "sysdig.imageRegistry" . -}} / {{- .Values.slim.kmoduleImage.repository -}} : {{- .Values.image.tag -}} +{{- end -}} + +{{/* +Return the proper Sysdig Agent image name for the Node Image Analyzer +*/}} +{{- define "sysdig.image.nodeImageAnalyzer" -}} + {{- include "sysdig.imageRegistry" . -}} / {{- .Values.nodeImageAnalyzer.image.repository -}} : {{- .Values.nodeImageAnalyzer.image.tag -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "sysdig.labels" -}} +helm.sh/chart: {{ include "sysdig.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + + +{{/* +Use like: {{ include "get_or_fail_if_in_settings" (dict "root" . "key" "" "setting" "") }} +Return the value of key "" and if "" is also defined in sysdig.settings., and error is thrown +NOTE: I don't like the error message! Too much information. +*/}} +{{- define "get_or_fail_if_in_settings" -}} +{{- $keyValue := tpl (printf "{{- .Values.%s -}}" .key) .root }} +{{- if $keyValue -}} + {{- if hasKey .root.Values.sysdig.settings .setting }}{{ fail (printf "Value '%s' is also set via .sysdig.settings.%s'." .key .setting) }}{{- end -}} + {{- $keyValue -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/sysdig/sysdig/1.9.200/templates/auditsink.yaml b/charts/sysdig/sysdig/1.9.200/templates/auditsink.yaml new file mode 100644 index 000000000..4f8378ea0 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/auditsink.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.auditLog.enabled .Values.auditLog.dynamicBackend.enabled }} +apiVersion: auditregistration.k8s.io/v1alpha1 +kind: AuditSink +metadata: + name: {{ template "sysdig.fullname" . }} + labels: +{{ include "sysdig.labels" . | indent 4 }} +spec: + policy: + level: RequestResponse + stages: + - ResponseComplete + - ResponseStarted + webhook: + throttle: + qps: 10 + burst: 15 + clientConfig: + service: + namespace: {{ .Release.Namespace }} + name: {{ template "sysdig.fullname" . }} + port: {{ .Values.auditLog.auditServerPort }} + path: /k8s_audit +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/clusterrole.yaml b/charts/sysdig/sysdig/1.9.200/templates/clusterrole.yaml new file mode 100644 index 000000000..35fde6101 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/clusterrole.yaml @@ -0,0 +1,66 @@ +{{- if .Values.rbac.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "sysdig.fullname" .}} + labels: +{{ include "sysdig.labels" . | indent 4 }} +rules: + - apiGroups: + - "" + resources: + - pods + - replicationcontrollers + - services + - endpoints + - events + - limitranges + - namespaces + - nodes + - resourcequotas + - persistentvolumes + - persistentvolumeclaims + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - daemonsets + - deployments + - ingresses + - replicasets + verbs: + - get + - list + - watch +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/clusterrolebinding.yaml b/charts/sysdig/sysdig/1.9.200/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..f8ab01298 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "sysdig.fullname" .}} + labels: +{{ include "sysdig.labels" . | indent 4 }} +subjects: + - kind: ServiceAccount + name: {{ template "sysdig.serviceAccountName" .}} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ template "sysdig.fullname" .}} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/configmap-custom-app-checks.yaml b/charts/sysdig/sysdig/1.9.200/templates/configmap-custom-app-checks.yaml new file mode 100644 index 000000000..6f0495efa --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/configmap-custom-app-checks.yaml @@ -0,0 +1,13 @@ +{{- if .Values.customAppChecks }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sysdig.fullname" . }}-custom-app-checks + labels: +{{ include "sysdig.labels" . | indent 4 }} +data: +{{- range $file, $content := .Values.customAppChecks }} + {{ $file }}: |- +{{ $content | indent 4}} +{{- end }} +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/configmap-image-analyzer.yaml b/charts/sysdig/sysdig/1.9.200/templates/configmap-image-analyzer.yaml new file mode 100644 index 000000000..d62acb6d5 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/configmap-image-analyzer.yaml @@ -0,0 +1,34 @@ +{{- if .Values.nodeImageAnalyzer.deploy }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sysdig.fullname" . }}-image-analyzer + labels: +{{ include "sysdig.labels" . | indent 4 }} +data: + debug: "{{ .Values.nodeImageAnalyzer.settings.debug | default false }}" + {{- if .Values.nodeImageAnalyzer.settings.imagePeriod }} + image_period: {{ .Values.nodeImageAnalyzer.settings.imagePeriod }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.imageCacheTTL }} + image_cache_ttl: {{ .Values.nodeImageAnalyzer.settings.imageCacheTTL }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.reportPeriod }} + report_period: {{ .Values.nodeImageAnalyzer.settings.reportPeriod }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.dockerSocketPath }} + docker_socket_path: {{ .Values.nodeImageAnalyzer.settings.dockerSocketPath }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.criSocketPath }} + cri_socket_path: {{ .Values.nodeImageAnalyzer.settings.criSocketPath }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.collectorEndpoint }} + collector_endpoint: {{ .Values.nodeImageAnalyzer.settings.collectorEndpoint }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.checkCertificate }} + check_certificate: {{ .Values.nodeImageAnalyzer.settings.checkCertificate }} + {{- end }} + {{- if .Values.nodeImageAnalyzer.settings.collectorTimeout }} + collector_timeout: {{ .Values.nodeImageAnalyzer.settings.collectorTimeout }} + {{- end }} +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/configmap.yaml b/charts/sysdig/sysdig/1.9.200/templates/configmap.yaml new file mode 100644 index 000000000..543f62af7 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/configmap.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sysdig.fullname" . }} + labels: +{{ include "sysdig.labels" . | indent 4 }} +data: + dragent.yaml: | + new_k8s: true +{{- $clusterName := include "get_or_fail_if_in_settings" (dict "root" . "key" "clusterName" "setting" "k8s_cluster_name")}} +{{- if $clusterName }} + k8s_cluster_name: {{ $clusterName }} +{{- end }} +{{- if or .Values.secure.enabled .Values.auditLog.enabled }} + security: + {{- if .Values.auditLog.enabled }} + k8s_audit_server_url: {{ .Values.auditLog.auditServerUrl }} + k8s_audit_server_port: {{ .Values.auditLog.auditServerPort }} + {{- end }} + {{- if .Values.secure.enabled }} + enabled: true + commandlines_capture: + enabled: true + memdump: + enabled: true + {{- end }} +{{- end }} +{{- $disableCaptures := include "get_or_fail_if_in_settings" (dict "root" . "key" "sysdig.disableCaptures" "setting" "sysdig_capture_enabled")}} +{{- if eq $disableCaptures "true" }} + sysdig_capture_enabled: false +{{- end }} +{{- $collectorHost := include "get_or_fail_if_in_settings" (dict "root" . "key" "collectorSettings.collectorHost" "setting" "collector")}} +{{- if $collectorHost }} + collector: {{ $collectorHost }} +{{- end }} +{{- $collectorPort := include "get_or_fail_if_in_settings" (dict "root" . "key" "collectorSettings.collectorPort" "setting" "collector_port")}} +{{- if $collectorPort }} + collector_port: {{ $collectorPort }} +{{- end }} +{{- $ssl := include "get_or_fail_if_in_settings" (dict "root" . "key" "collectorSettings.ssl" "setting" "ssl")}} +{{- if $ssl }} + ssl: {{ $ssl }} +{{- end }} +{{- $sslVerifyCertificate := include "get_or_fail_if_in_settings" (dict "root" . "key" "collectorSettings.sslVerifyCertificate" "setting" "ssl_verify_certificate")}} +{{- if $sslVerifyCertificate }} + ssl_verify_certificate: {{ $sslVerifyCertificate }} +{{- end }} +{{- if .Values.sysdig.settings }} +{{ toYaml .Values.sysdig.settings | indent 4 }} +{{- end }} +{{- if .Values.prometheus.file }} + prometheus.yaml: | +{{ toYaml .Values.prometheus.yaml | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/sysdig/sysdig/1.9.200/templates/daemonset-image-analyzer.yaml b/charts/sysdig/sysdig/1.9.200/templates/daemonset-image-analyzer.yaml new file mode 100644 index 000000000..ae9888bbf --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/daemonset-image-analyzer.yaml @@ -0,0 +1,148 @@ +{{- if .Values.nodeImageAnalyzer.deploy }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "sysdig.fullname" . }}-image-analyzer + labels: + app.kubernetes.io/name: {{ include "sysdig.name" . }}-image-analyzer +{{ include "sysdig.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "sysdig.name" . }}-image-analyzer + app.kubernetes.io/instance: {{ .Release.Name }} + updateStrategy: + type: RollingUpdate + template: + metadata: + name: {{ template "sysdig.fullname" . }}-image-analyzer + labels: + app.kubernetes.io/name: {{ include "sysdig.name" . }}-image-analyzer +{{ include "sysdig.labels" . | indent 8 }} + spec: + volumes: + # Needed for cri-o image inspection. + # cri-o and especially OCP 4.x by default use containers/storage to handle images, and this makes sure that the + # analyzer has access to the configuration. This file is mounted read-only. + - name: etc-containers-storage-vol + hostPath: + path: /etc/containers/storage.conf + # Needed for cri-o image inspection. + # This is the directory where image data is stored by default when using cri-o and OCP 4.x and the analyzer + # uses it to get the data to scan. This directory must be mounted r/w because proper access to its files through + # the containers/storage library is always regulated with a lockfile. + - name: var-lib-containers-vol + hostPath: + path: /var/lib/containers + # Needed for socket access + - name: varrun-vol + hostPath: + path: /var/run + # Add custom volume here + - name: sysdig-image-analyzer-config + configMap: + name: {{ template "sysdig.fullname" . }}-image-analyzer + optional: true + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + # The following line is necessary for RBAC + serviceAccount: {{ template "sysdig.serviceAccountName" .}} + {{- if .Values.nodeImageAnalyzer.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.nodeImageAnalyzer.image.pullSecrets | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 5 + containers: + - name: sysdig-image-analyzer + image: {{ template "sysdig.image.nodeImageAnalyzer" . }} + securityContext: + # The privileged flag is necessary for OCP 4.x and other Kubernetes setups that deny host filesystem access to + # running containers by default regardless of volume mounts. In those cases, access to the CRI socket would fail. + privileged: true + imagePullPolicy: {{ .Values.nodeImageAnalyzer.image.pullPolicy }} + resources: +{{ toYaml .Values.nodeImageAnalyzer.resources | indent 10 }} + volumeMounts: + - mountPath: /var/run + name: varrun-vol + - mountPath: /etc/containers/storage.conf + name: etc-containers-storage-vol + readOnly: true + - mountPath: /var/lib/containers + name: var-lib-containers-vol + env: + - name: ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "sysdig.fullname" . }} + key: access-key + - name: IMAGE_PERIOD + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: image_period + optional: true + - name: IMAGE_CACHE_TTL + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: image_cache_ttl + optional: true + - name: REPORT_PERIOD + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: report_period + optional: true + - name: DOCKER_SOCKET_PATH + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: docker_socket_path + optional: true + - name: CRI_SOCKET_PATH + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: cri_socket_path + optional: true + #TODO: Get from agent config instead? + - name: AM_COLLECTOR_ENDPOINT + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: collector_endpoint + optional: true + #TODO: Get from agent config instead? + - name: AM_COLLECTOR_TIMEOUT + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: collector_timeout + optional: true + - name: CHECK_CERTIFICATE + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: check_certificate + optional: true + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: K8S_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DEBUG + valueFrom: + configMapKeyRef: + name: {{ template "sysdig.fullname" . }}-image-analyzer + key: debug + optional: true +{{- end }} \ No newline at end of file diff --git a/charts/sysdig/sysdig/1.9.200/templates/daemonset.yaml b/charts/sysdig/sysdig/1.9.200/templates/daemonset.yaml new file mode 100644 index 000000000..2e1447f92 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/daemonset.yaml @@ -0,0 +1,227 @@ +{{- if .Values.sysdig.accessKey }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "sysdig.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "sysdig.name" . }} +{{ include "sysdig.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "sysdig.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: {{ template "sysdig.fullname" .}} + labels: + app.kubernetes.io/name: {{ include "sysdig.name" . }} +{{ include "sysdig.labels" . | indent 8 }} + {{- if .Values.daemonset.annotations }} + annotations: +{{ toYaml .Values.daemonset.annotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "sysdig.serviceAccountName" .}} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + terminationGracePeriodSeconds: 5 + {{- if .Values.daemonset.affinity }} + affinity: +{{ toYaml .Values.daemonset.affinity | indent 8 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.slim.enabled }} + initContainers: + - name: sysdig-agent-kmodule + image: {{ template "sysdig.image.kmodule" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 12 }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + privileged: true + runAsNonRoot: false + runAsUser: 0 + readOnlyRootFilesystem: false + allowPrivilegeEscalation: true + resources: +{{ toYaml .Values.slim.resources | indent 12 }} + volumeMounts: + - mountPath: /etc/modprobe.d + name: modprobe-d + readOnly: true + - mountPath: /host/boot + name: boot-vol + readOnly: true + - mountPath: /host/lib/modules + name: modules-vol + readOnly: true + - mountPath: /host/usr + name: usr-vol + readOnly: true + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "sysdig.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: +{{ toYaml .Values.resources | indent 12 }} + securityContext: + capabilities: + drop: + - ALL + privileged: true + runAsNonRoot: false + runAsUser: 0 + readOnlyRootFilesystem: false + allowPrivilegeEscalation: true + env: + {{- if .Values.ebpf.enabled }} + - name: SYSDIG_BPF_PROBE + value: + {{- end }} + {{- if .Values.proxy.httpProxy }} + - name: http_proxy + value: {{ .Values.proxy.httpProxy }} + {{- end }} + {{- if .Values.proxy.httpsProxy }} + - name: https_proxy + value: {{ .Values.proxy.httpsProxy }} + {{- end }} + {{- if .Values.proxy.noProxy }} + - name: no_proxy + value: {{ .Values.proxy.noProxy }} + {{- end }} + {{- if .Values.timezone }} + - name: TZ + value: {{ .Values.timezone }} + {{- end }} + {{- range $key, $value := .Values.daemonset.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + readinessProbe: + exec: + command: [ "test", "-e", "/opt/draios/logs/running" ] + initialDelaySeconds: 10 + livenessProbe: + exec: + command: [ "test", "-e", "/opt/draios/logs/running" ] + initialDelaySeconds: 10 + volumeMounts: + {{- if not .Values.slim.enabled }} + - mountPath: /etc/modprobe.d + name: modprobe-d + readOnly: true + {{- end }} + - mountPath: /host/dev + name: dev-vol + readOnly: false + - mountPath: /host/proc + name: proc-vol + readOnly: true + {{- if not .Values.slim.enabled }} + - mountPath: /host/boot + name: boot-vol + readOnly: true + - mountPath: /host/lib/modules + name: modules-vol + readOnly: true + - mountPath: /host/usr + name: usr-vol + readOnly: true + {{- end }} + - mountPath: /host/run + name: run-vol + - mountPath: /host/var/run + name: varrun-vol + - mountPath: /dev/shm + name: dshm + - mountPath: /opt/draios/etc/kubernetes/config + name: sysdig-agent-config + - mountPath: /opt/draios/etc/kubernetes/secrets + name: sysdig-agent-secrets + {{- if (and .Values.ebpf.enabled .Values.ebpf.settings.mountEtcVolume) }} + - mountPath: /host/etc + name: etc-fs + readOnly: true + {{- end }} + {{- if .Values.customAppChecks }} + - mountPath: /opt/draios/lib/python/checks.custom.d + name: custom-app-checks-volume + {{- end }} + - mountPath: /host/etc/os-release + name: osrel + readOnly: true + {{- if .Values.extraVolumes.mounts }} +{{ toYaml .Values.extraVolumes.mounts | indent 12 }} + {{- end }} + volumes: + - name: modprobe-d + hostPath: + path: /etc/modprobe.d + - name: osrel + hostPath: + path: /etc/os-release + type: FileOrCreate + - name: dshm + emptyDir: + medium: Memory + - name: dev-vol + hostPath: + path: /dev + - name: proc-vol + hostPath: + path: /proc + - name: boot-vol + hostPath: + path: /boot + - name: modules-vol + hostPath: + path: /lib/modules + - name: usr-vol + hostPath: + path: /usr + - name: run-vol + hostPath: + path: /run + - name: varrun-vol + hostPath: + path: /var/run + {{- if (and .Values.ebpf.enabled .Values.ebpf.settings.mountEtcVolume) }} + - name: etc-fs + hostPath: + path: /etc + {{- end }} + - name: sysdig-agent-config + configMap: + name: {{ template "sysdig.fullname" . }} + optional: true + - name: sysdig-agent-secrets + secret: + secretName: {{ template "sysdig.fullname" . }} + {{- if .Values.customAppChecks }} + - name: custom-app-checks-volume + configMap: + name: {{ template "sysdig.fullname" . }}-custom-app-checks + {{- end }} + {{- if .Values.extraVolumes.volumes }} +{{ toYaml .Values.extraVolumes.volumes | indent 8 }} + {{- end }} + updateStrategy: +{{ toYaml .Values.daemonset.updateStrategy | indent 4 }} +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/secrets.yaml b/charts/sysdig/sysdig/1.9.200/templates/secrets.yaml new file mode 100644 index 000000000..7e458160f --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "sysdig.fullname" . }} + labels: +{{ include "sysdig.labels" . | indent 4 }} +type: Opaque +data: + access-key : {{ required "A valid .Values.sysdig.accessKey is required" .Values.sysdig.accessKey | b64enc | quote }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/securitycontextconstraint.yaml b/charts/sysdig/sysdig/1.9.200/templates/securitycontextconstraint.yaml new file mode 100644 index 000000000..e604859b2 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/securitycontextconstraint.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.scc.create (.Capabilities.APIVersions.Has "security.openshift.io/v1") }} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: | + This provides the minimum requirements to the Sysdig agent to run in the Openshift. + name: {{ template "sysdig.fullname" . }} + labels: +{{ include "sysdig.labels" . | indent 4 }} +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: true +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: [] +allowedUnsafeSysctls: [] +defaultAddCapabilities: [] +fsGroup: + type: RunAsAny +groups: [] +priority: 0 +readOnlyRootFilesystem: false +requiredDropCapabilities: [] +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:{{ .Release.Namespace }}:{{ template "sysdig.serviceAccountName" .}} +volumes: +- hostPath +- emptyDir +- secret +- configMap +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/service.yaml b/charts/sysdig/sysdig/1.9.200/templates/service.yaml new file mode 100644 index 000000000..ba94d6894 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/service.yaml @@ -0,0 +1,15 @@ +{{- if .Values.auditLog.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "sysdig.fullname" . }} + labels: +{{ include "sysdig.labels" . | indent 4 }} +spec: + selector: + app.kubernetes.io/name: {{ include "sysdig.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ .Values.auditLog.auditServerPort }} +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/templates/serviceaccount.yaml b/charts/sysdig/sysdig/1.9.200/templates/serviceaccount.yaml new file mode 100644 index 000000000..141affa38 --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "sysdig.serviceAccountName" .}} + labels: +{{ include "sysdig.labels" . | indent 4 }} +{{- end }} diff --git a/charts/sysdig/sysdig/1.9.200/values.yaml b/charts/sysdig/sysdig/1.9.200/values.yaml new file mode 100644 index 000000000..fb1e29cbb --- /dev/null +++ b/charts/sysdig/sysdig/1.9.200/values.yaml @@ -0,0 +1,220 @@ +# Default values for Sysdig Monitor and Secure Helm package. + +image: + # This is a hack to support RELATED_IMAGE_ feature in Helm based + # Operators + # + # As long as I don't want to people to use this, I will keep it undocumented + overrideValue: + + registry: docker.io + repository: sysdig/agent + tag: 10.3.0 + # Specify a imagePullPolicy + # Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + # ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + pullPolicy: IfNotPresent + # Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # + # pullSecrets: + # - name: myRegistrKeySecretName + +resources: + # Although resources needed are subjective on the actual workload we provide + # a sane defaults ones. If you have more questions or concerns, please refer + # to Sysdig Support for more info about it + requests: + cpu: 600m + memory: 512Mi + limits: + cpu: 2000m + memory: 1536Mi + +rbac: + # true here enables creation of rbac resources + create: true + +scc: + # true here enabled creation of Security Context Constraints in Openshift + create: true + +serviceAccount: + # Create and use serviceAccount resources + create: true + # Use this value as serviceAccountName + name: + +daemonset: + # Perform rolling updates by default in the DaemonSet agent + # ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + updateStrategy: + # You can also customize maxUnavailable, maxSurge or minReadySeconds if you + # need it + type: RollingUpdate + ## Extra environment variables that will be pass onto deployment pods + env: {} + # Allow the DaemonSet to schedule using affinity rules + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - key: beta.kubernetes.io/os + operator: In + values: + - linux + # Allow the DaemonSet to set annotations + annotations: {} + +# If is behind a proxy you can set the proxy server +proxy: + httpProxy: + httpsProxy: + noProxy: + +# Set daemonset timezone +timezone: + +# Set daemonset priorityClassName +priorityClassName: + +ebpf: + # Enable eBPF support for Sysdig Agent + enabled: false + + settings: + # Needed to correctly detect the kernel version for the eBPF program + # Set to false if not running on Google COS + mountEtcVolume: true + +slim: + # Uses a slim version of the Sysdig Agent + enabled: false + # When using slim the kernel module is built in other container, which + # contains the toolchain required to build the kernel module. + kmoduleImage: + repository: sysdig/agent-kmodule + + resources: + # Resources required by the kernel module builder image. These are some + # a sane defaults ones, but you can tweak or ask Sysdig Support for more + # info about this + requests: + cpu: 1000m + memory: 348Mi + limits: + memory: 512Mi + +# For Sysdig On-Prem installations or for custom collector settings, set the following fields +collectorSettings: + collectorHost: + collectorPort: + ssl: + sslVerifyCertificate: + +# Setting a cluster name allows you to filter events from this cluster using kubernetes.cluster.name +clusterName: "" + +sysdig: + # Required: You need your Sysdig Agent access key before running agents. + accessKey: "" + + # Disable capture functionality (see https://docs.sysdig.com/en/disable-captures.html) + disableCaptures: false + + # Advanced settings. Any option in here will be directly translated into dragent.yaml in the Configmap + settings: {} + ### Agent tags + # tags: linux:ubuntu,dept:dev,local:nyc + +secure: + # true here enables Sysdig Secure: container run-time security & forensics + enabled: true + +auditLog: + # true here activates the K8s Audit Log feature for Sysdig Secure + enabled: false + auditServerUrl: 0.0.0.0 + auditServerPort: 7765 + + dynamicBackend: + # true here configures an AuditSink who will receive the K8s audit logs + enabled: false + +nodeImageAnalyzer: + deploy: false + image: + repository: sysdig/node-image-analyzer + tag: 0.1.0 + pullPolicy: IfNotPresent + # pullSecrets: + # - name: myRegistrKeySecretName + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 500m + memory: 1024Mi + + # Additional advanced settings + settings: {} + +customAppChecks: {} + # Allow passing custom app checks for Sysdig Agent. + # Example: + # + # sample.py: |- + # from checks import AgentCheck + # + # class MyCustomCheck(AgentCheck): + # def check(self, instance): + # self.gauge("testhelm", 1) + +# Promscrape prometheus.yaml not configured by default +prometheus: + file: false + yaml: {} + +extraVolumes: + volumes: [] + mounts: [] + # Allow passing extra volumes to the agent to mount secrets or certificates + # to authenticate in different services. + # Any kind of volume can be passed. Example: + # + # extraVolumes: + # volumes: + # - name: sysdig-new-cm + # configMap: + # name: my-cm + # optional: true + # - name: sysdig-new-secret + # secret: + # secretName: my-secret + # mounts: + # - mountPath: /opt/draios/cm + # name: sysdig-new-cm + # - mountPath: /opt/draios/secret + # name: sysdig-new-secret + +# Allow sysdig to run on Kubernetes 1.6 masters. +tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/.helmignore b/charts/universal-crossplane/universal-crossplane/1.2.200100/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/Chart.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/Chart.yaml new file mode 100644 index 000000000..d7090fa7d --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/Chart.yaml @@ -0,0 +1,42 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Upbound Universal Crossplane + catalog.cattle.io/release-name: universal-crossplane +apiVersion: v1 +appVersion: 1.2.2001 +description: 'Upbound Universal Crossplane (UXP) is Upbound''s official enterprise-grade + distribution of Crossplane. It''s fully compatible with upstream Crossplane, open + source, capable of connecting to Upbound Cloud for real-time dashboard visibility, + and maintained by Upbound. It''s the easiest way for both individual community members + and enterprises to build their production control planes. ' +home: https://upbound.io +icon: https://raw.githubusercontent.com/upbound/universal-crossplane/66ce9eb2c5a0c3af8ed7d19551a2c4d743b933b9/docs/media/logo.png +keywords: +- cloud +- infrastructure +- services +- application +- database +- cache +- bucket +- infra +- app +- ops +- oam +- gcp +- azure +- aws +- alibaba +- cloudsql +- rds +- s3 +- azuredatabase +- asparadb +- gke +- aks +- eks +maintainers: +- email: info@upbound.io + name: Upbound Inc. +name: universal-crossplane +version: 1.2.200100 diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/app-readme.md b/charts/universal-crossplane/universal-crossplane/1.2.200100/app-readme.md new file mode 100644 index 000000000..c6d8ecfe0 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/app-readme.md @@ -0,0 +1,36 @@ +# Upbound Universal Crossplane (UXP) + +Upbound Universal Crossplane (UXP) is [Upbound's](https://upbound.io) official enterprise-grade distribution of [Crossplane](https://crossplane.io). It's fully compatible with upstream Crossplane, [open source](https://github.com/upbound/universal-crossplane), capable of connecting to [Upbound Cloud](https://cloud.upbound.io) for real-time dashboard visibility, and maintained by Upbound. It's the easiest way for both individual community members and enterprises to build their production control planes. + +## Connecting to Upbound Cloud + +You can optionally connect your Universal Crossplane instance to Upbound Cloud. +Follow the steps below to connect your Universal Crossplane cluster to your Upbound Cloud Console. + +1. Install Upbound CLI + + You will need to make sure you have the Upbound CLI installed before you continue. If you need more information on how to install the Upbound CLI, you can read the [Installing Upbound CLI Documentation](https://cloud.upbound.io/docs/cli). + + ``` + curl -sL https://cli.upbound.io | sh + ``` + +2. Log in to Upbound Cloud + + ``` + up cloud login --profile=rancher --account=$UPBOUND_ACCOUNT + ``` + + Or, to log in using an Upbound [API token](https://cloud.upbound.io/account/settings/tokens): + + ``` + up cloud login --profile=rancher --account=$UPBOUND_ACCOUNT --token=$API_TOKEN + ``` + +3. Create a Self-Hosted Control Plane + + ``` + up cloud controlplane attach $CONTROL_PLANE_NAME --profile=rancher + ``` + +4. Provide the token obtained in the previous step as `upbound.controlPlane.token` under `Upbound Cloud` section \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/questions.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/questions.yaml new file mode 100644 index 000000000..c5cb628bf --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/questions.yaml @@ -0,0 +1,184 @@ +questions: +# Upbound Cloud configuration +- variable: upbound.controlPlane.token + label: upbound.controlPlane.token + required: false + type: password + description: Token used to connect Upbound Cloud + group: "Upbound Cloud" +- variable: upbound.controlPlane.permission + label: upbound.controlPlane.permission + required: false + type: enum + default: "edit" + options: + - "edit" + - "view" + description: Cluster permissions for Upbound Cloud + group: "Upbound Cloud" +# Basic Crossplane configuration +- variable: replicas + label: replicas + description: Number of replicas to run for Crossplane pods + type: int + default: 1 + required: true + group: "Crossplane" +# Advanced Crossplane configuration +- variable: advancedCrossplaneConfiguration + description: View advanced configuration settings + label: View advanced configuration + type: boolean + default: false + show_subquestion_if: true + group: "Crossplane" + subquestions: + - variable: leaderElection + label: leaderElection + description: "Enable leader election for Crossplane Managers pod" + type: boolean + default: true + required: false + group: "Crossplane" + - variable: deploymentStrategy + label: deploymentStrategy + description: "The deployment strategy for the Crossplane and RBAC Manager (if enabled) pods" + type: enum + default: "RollingUpdate" + options: + - "RollingUpdate" + - "Recreate" + required: true + group: "Crossplane" + - variable: priorityClassName + label: priorityClassName + description: "Priority class name for Crossplane and RBAC Manager (if enabled) pods" + type: string + required: false + group: "Crossplane" + - variable: metrics.enabled + label: metrics.enabled + description: "Expose Crossplane and RBAC Manager metrics endpoint" + type: boolean + required: false + group: "Crossplane" +# Basic Crossplane RBAC Manager configuration +- variable: rbacManager.deploy + label: rbacManager.deploy + description: "Deploy RBAC Manager" + type: boolean + default: true + required: true + group: "Crossplane RBAC Manager" +- variable: rbacManager.replicas + label: rbacManager.replicas + description: "The number of replicas to run for the RBAC Manager pods" + type: int + default: 1 + required: true + group: "Crossplane RBAC Manager" +# Advanced Crossplane RBAC Manager configuration +- variable: advancedRBACManagerConfiguration + description: View advanced configuration settings + label: View advanced configuration + type: boolean + default: false + show_subquestion_if: true + group: "Crossplane RBAC Manager" + subquestions: + - variable: rbacManager.leaderElection + label: rbacManager.leaderElection + description: "Enable leader election for RBAC Managers pod" + type: boolean + default: true + group: "Crossplane RBAC Manager" + - variable: rbacManager.managementPolicy + label: rbacManager.managementPolicy + description: RBAC manager permissions. 'All' enables management for every Crossplane controller and user role. 'Basic' enables management just for Crossplane controller roles and the crossplane-admin, crossplane-edit, and crossplane-view user roles. + type: enum + default: "Basic" + options: + - "Basic" + - "All" + required: true + group: "Crossplane RBAC Manager" + - variable: rbacManager.skipAggregatedClusterRoles + label: rbacManager.skipAggregatedClusterRoles + description: "Opt out of deploying aggregated ClusterRoles" + type: boolean + default: true + group: "Crossplane RBAC Manager" +# Basic Package configuration +- variable: provider.packages + label: provider.packages + description: List of Provider packages to install with Crossplane. Select 'Edit as YAML' for the best editing experience. + type: string + required: false + group: "Packages" +- variable: configuration.packages + label: configuration.packages + description: List of Configuration packages to install with Crossplane. Select 'Edit as YAML' for the best editing experience. + type: string + required: false + group: "Packages" +# Advanced Package configuration +- variable: advancedPackageConfiguration + description: View advanced configuration settings + label: View advanced configuration + type: boolean + default: false + show_subquestion_if: true + group: "Packages" + subquestions: + - variable: packageCache.sizeLimit + label: packageCache.sizeLimit + description: "Size limit for package cache. If medium is Memory then maximum usage would be the minimum of this value the sum of all memory limits on containers in the Crossplane pod" + type: string + default: "5Mi" + group: "Packages" + - variable: packageCache.medium + label: packageCache.medium + description: "Storage medium for package cache. Memory means volume will be backed by tmpfs, which can be useful for development" + type: string + group: "Packages" + - variable: packageCache.pvc + label: packageCache.pvc + description: "Name of the PersistentVolumeClaim to be used as the package cache. Providing a value will cause the default emptyDir volume to not be mounted" + type: string + group: "Packages" +# Basic XGQL configuration +- variable: xgql.config.debugMode + label: xgql.config.debugMode + description: "Enable debug mode for XGQL" + type: boolean + default: false + group: "XGQL" +# Advanced Crossplane configuration +- variable: advancedXGQLConfiguration + description: View advanced configuration settings + label: View advanced configuration + type: boolean + default: false + show_subquestion_if: true + group: "XGQL" + subquestions: + - variable: xgql.metrics.enabled + label: xgql.metrics.enabled + description: "Expose XGQL metrics endpoint" + type: boolean + required: false + group: "XGQL" +# Basic Agent configuration +- variable: agent.config.debugMode + label: agent.config.debugMode + description: "Enable debug mode for Upbound Agent" + type: boolean + default: false + group: "Upbound Agent" +# Basic Bootstrapper configuration +- variable: bootstrapper.config.debugMode + label: bootstrapper.config.debugMode + description: "Enable debug mode for Bootstrapper" + type: boolean + default: false + group: "Bootstrapper" \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/NOTES.txt b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/NOTES.txt new file mode 100644 index 000000000..33260c04f --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/NOTES.txt @@ -0,0 +1,15 @@ +By proceeding, you are accepting to comply with terms and conditions in https://licenses.upbound.io/upbound-software-license.html + +✨ Thank you for installing Universal Crossplane! +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +🚀 You can now connect your cluster to Upbound Cloud! + +Example command: +{{ if eq .Values.upbound.controlPlane.permission "edit" }} +$ up cloud controlplane attach | \ +up uxp connect --token-secret-name {{ .Values.upbound.controlPlane.tokenSecretName }} --namespace {{ .Release.Namespace }} - +{{- else if eq .Values.upbound.controlPlane.permission "view" }} +$ up cloud controlplane attach --view-only | \ +up uxp connect --token-secret-name {{ .Values.upbound.controlPlane.tokenSecretName }} --namespace {{ .Release.Namespace }} - +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/_helpers.tpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/_helpers.tpl new file mode 100644 index 000000000..7ba5d8058 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/_helpers.tpl @@ -0,0 +1,21 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Common labels +*/}} +{{- define "labels" -}} +helm.sh/chart: {{ include "chart" . }} +{{ include "selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "selectorLabels" -}} +app.kubernetes.io/name: {{ include "name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/_helpers.tpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/_helpers.tpl new file mode 100644 index 000000000..bdca1ae09 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/_helpers.tpl @@ -0,0 +1,21 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "bootstrapper-name" -}} +{{- "upbound-bootstrapper" -}} +{{- end -}} + +{{/* +Labels - bootstrapper +*/}} +{{- define "labelsBootstrapper" -}} +{{ include "labels" . }} +app.kubernetes.io/component: bootstrapper +{{- end }} + +{{/* +Selector labels - bootstrapper +*/}} +{{- define "selectorLabelsBootstrapper" -}} +{{ include "selectorLabels" . }} +app.kubernetes.io/component: bootstrapper +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrole.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrole.yaml new file mode 100644 index 000000000..162abdd7a --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrole.yaml @@ -0,0 +1,26 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "bootstrapper-name" . }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +rules: + # Bootstrapper needs to identify the cluster uniquely and it does that by using + # UID of kube-system namespace. + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "kube-system" + verbs: + - "get" + # Controller-runtime requires watch and list permissions to build its resource + # cache of the kind that any client query is made for. + - apiGroups: + - "" + resources: + - namespaces + verbs: + - "list" + - "watch" diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrolebinding.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrolebinding.yaml new file mode 100644 index 000000000..33fd634cd --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "bootstrapper-name" . }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "bootstrapper-name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "bootstrapper-name" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/deployment.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/deployment.yaml new file mode 100644 index 000000000..14dd4335e --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "bootstrapper-name" . }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "selectorLabelsBootstrapper" . | nindent 6 }} + template: + metadata: + labels: + {{- include "selectorLabelsBootstrapper" . | nindent 8 }} + spec: + serviceAccountName: {{ template "bootstrapper-name" . }} + {{- if .Values.billing.awsMarketplace.enabled }} + securityContext: + # Providing this is not required for 1.19 or later clusters. + # See https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html + fsGroup: 1337 + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range $index, $secret := .Values.imagePullSecrets }} + - name: {{ $secret }} + {{- end }} + {{ end }} + containers: + - name: bootstrapper + image: "{{ .Values.bootstrapper.image.repository }}:{{ .Values.bootstrapper.image.tag }}" + args: + - start + - --namespace + - {{ .Release.Namespace }} + - --upbound-api-url + - {{ .Values.upbound.apiURL }} + - --controller + - tls-secrets + {{- if .Values.billing.awsMarketplace.enabled }} + - --controller + - aws-marketplace + {{- end }} + {{- if .Values.bootstrapper.config.debugMode }} + - "--debug" + {{- end }} + {{- range $arg := .Values.bootstrapper.config.args }} + - {{ $arg }} + {{- end }} + imagePullPolicy: {{ .Values.bootstrapper.image.pullPolicy }} + resources: + {{- toYaml .Values.bootstrapper.resources | nindent 12 }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/role.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/role.yaml new file mode 100644 index 000000000..e14c58f58 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "bootstrapper-name" . }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["watch", "list"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update", "patch"] + resourceNames: + - uxp-ca + - upbound-agent-public-certs + - upbound-agent-tls + - xgql-tls + {{- if .Values.billing.awsMarketplace.enabled }} + - upbound-entitlement + {{- end}} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/rolebinding.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/rolebinding.yaml new file mode 100644 index 000000000..aa41bb33a --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/rolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "bootstrapper-name" . }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "bootstrapper-name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "bootstrapper-name" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/secret-entitlement.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/secret-entitlement.yaml new file mode 100644 index 000000000..7e311b629 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/secret-entitlement.yaml @@ -0,0 +1,9 @@ +{{- if .Values.billing.awsMarketplace.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: upbound-entitlement + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +type: Opaque +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/serviceaccount.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/serviceaccount.yaml new file mode 100644 index 000000000..1768272a3 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "bootstrapper-name" . }} + {{- if and .Values.billing.awsMarketplace.enabled .Values.billing.awsMarketplace.iamRoleARN }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.billing.awsMarketplace.iamRoleARN | quote }} + {{- end }} + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/uxp-ca-tls-secret.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/uxp-ca-tls-secret.yaml new file mode 100644 index 000000000..07163971e --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/uxp-ca-tls-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: uxp-ca + labels: + {{- include "labels" . | nindent 4 }} +type: Opaque diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/versions-configmap.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/versions-configmap.yaml new file mode 100644 index 000000000..1eacb8d16 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/bootstrapper/versions-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: universal-crossplane-config + labels: + {{- include "labelsBootstrapper" . | nindent 4 }} +data: + crossplaneVersion: {{ (trimPrefix "v" .Values.image.tag) }} + xgqlVersion: {{ (trimPrefix "v" .Values.xgql.image.tag) }} + agentVersion: {{ (trimPrefix "v" .Values.agent.image.tag) }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/NOTES.txt b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/NOTES.txt new file mode 100644 index 000000000..f1c8a0c63 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/NOTES.txt @@ -0,0 +1,8 @@ +Release: {{.Release.Name}} + +Chart Name: {{.Chart.Name}} +Chart Description: {{.Chart.Description}} +Chart Version: {{.Chart.Version}} +Chart Application Version: {{.Chart.AppVersion}} + +Kube Version: {{.Capabilities.KubeVersion}} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/_helpers.tpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/_helpers.tpl new file mode 100644 index 000000000..921e9df26 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/_helpers.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrole.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrole.yaml new file mode 100644 index 000000000..8a6b573cc --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrole.yaml @@ -0,0 +1,93 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }} + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-crossplane: "true" +rules: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:system:aggregate-to-crossplane + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + crossplane.io/scope: "system" + rbac.crossplane.io/aggregate-to-crossplane: "true" +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - "*" +- apiGroups: + - apiextensions.crossplane.io + - pkg.crossplane.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - extensions + - apps + resources: + - deployments + verbs: + - get + - list + - create + - update + - patch + - delete + - watch +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - create + - update + - patch + - watch + - delete diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrolebinding.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrolebinding.yaml new file mode 100644 index 000000000..d0fb877c2 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "name" . }} + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "name" . }} +subjects: +- kind: ServiceAccount + name: {{ template "name" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/deployment.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/deployment.yaml new file mode 100644 index 000000000..b51ab99ad --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/deployment.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "name" . }} + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "name" . }} + release: {{ .Release.Name }} + strategy: + type: {{ .Values.deploymentStrategy }} + template: + metadata: + {{- if .Values.metrics.enabled }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8080" + prometheus.io/scrape: "true" + {{- end }} + labels: + app: {{ template "name" . }} + release: {{ .Release.Name }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + serviceAccountName: {{ template "name" . }} + initContainers: + - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + args: + - core + - init + {{- range $arg := .Values.provider.packages }} + - --provider + - "{{ $arg }}" + {{- end }} + {{- range $arg := .Values.configuration.packages }} + - --configuration + - "{{ $arg }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }}-init + resources: + {{- toYaml .Values.resourcesCrossplane | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContextCrossplane | nindent 12 }} + containers: + - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + args: + - core + - start + {{- range $arg := .Values.args }} + - {{ $arg }} + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + resources: + {{- toYaml .Values.resourcesCrossplane | nindent 12 }} + {{- if .Values.metrics.enabled }} + ports: + - name: metrics + containerPort: 8080 + {{- end }} + securityContext: + {{- toYaml .Values.securityContextCrossplane | nindent 12 }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LEADER_ELECTION + value: "{{ .Values.leaderElection }}" + {{- range $key, $value := .Values.extraEnvVarsCrossplane }} + - name: {{ $key | replace "." "_" }} + value: {{ $value | quote }} + {{- end}} + volumeMounts: + - mountPath: /cache + name: package-cache + volumes: + - name: package-cache + {{- if .Values.packageCache.pvc }} + persistentVolumeClaim: + claimName: {{ .Values.packageCache.pvc }} + {{- else }} + emptyDir: + medium: {{ .Values.packageCache.medium }} + sizeLimit: {{ .Values.packageCache.sizeLimit }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{ toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{ toYaml .Values.affinity | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-allowed-provider-permissions.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-allowed-provider-permissions.yaml new file mode 100644 index 000000000..3b6ce2270 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-allowed-provider-permissions.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbacManager.deploy }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:allowed-provider-permissions + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-allowed-provider-permissions: "true" +{{- end}} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrole.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrole.yaml new file mode 100644 index 000000000..de8478697 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrole.yaml @@ -0,0 +1,94 @@ +{{- if .Values.rbacManager.deploy }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}-rbac-manager + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - namespaces + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.crossplane.io + resources: + - compositeresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - pkg.crossplane.io + resources: + - providerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - roles + verbs: + - get + - list + - watch + - create + - update + - patch + # The RBAC manager may grant access it does not have. + - escalate +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - bind +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - "*" +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - create + - update + - patch + - watch + - delete +{{- end}} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrolebinding.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrolebinding.yaml new file mode 100644 index 000000000..bda467f24 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbacManager.deploy }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "name" . }}-rbac-manager + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "name" . }}-rbac-manager +subjects: +- kind: ServiceAccount + name: rbac-manager + namespace: {{ .Release.Namespace }} +{{- end}} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-deployment.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-deployment.yaml new file mode 100644 index 000000000..110a5285d --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-deployment.yaml @@ -0,0 +1,85 @@ +{{- if .Values.rbacManager.deploy }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "name" . }}-rbac-manager + labels: + app: {{ template "name" . }}-rbac-manager + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.rbacManager.replicas }} + selector: + matchLabels: + app: {{ template "name" . }}-rbac-manager + release: {{ .Release.Name }} + strategy: + type: {{ .Values.deploymentStrategy }} + template: + metadata: + {{- if .Values.metrics.enabled }} + annotations: + prometheus.io/path: /metrics + prometheus.io/port: "8080" + prometheus.io/scrape: "true" + {{- end }} + labels: + app: {{ template "name" . }}-rbac-manager + release: {{ .Release.Name }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + serviceAccountName: rbac-manager + initContainers: + - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + args: + - rbac + - init + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }}-init + resources: + {{- toYaml .Values.resourcesRBACManager | nindent 12 }} + securityContext: + {{- toYaml .Values.securityContextRBACManager | nindent 12 }} + containers: + - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + args: + - rbac + - start + {{- if .Values.rbacManager.managementPolicy }} + - --manage={{ .Values.rbacManager.managementPolicy }} + {{- end }} + {{- range $arg := .Values.rbacManager.args }} + - {{ $arg }} + {{- end }} + - --provider-clusterrole={{ template "name" .}}:allowed-provider-permissions + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + resources: + {{- toYaml .Values.resourcesRBACManager | nindent 12 }} + {{- if .Values.metrics.enabled }} + ports: + - name: metrics + containerPort: 8080 + {{- end }} + securityContext: + {{- toYaml .Values.securityContextRBACManager | nindent 12 }} + env: + - name: LEADER_ELECTION + value: "{{ .Values.rbacManager.leaderElection }}" + {{- range $key, $value := .Values.extraEnvVarsRBACManager }} + - name: {{ $key | replace "." "_" }} + value: {{ $value | quote }} + {{- end}} + {{- if .Values.rbacManager.nodeSelector }} + nodeSelector: {{ toYaml .Values.rbacManager.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.rbacManager.tolerations }} + tolerations: {{ toYaml .Values.rbacManager.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.rbacManager.affinity }} + affinity: {{ toYaml .Values.rbacManager.affinity | nindent 8 }} + {{- end }} +{{- end}} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-managed-clusterroles.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-managed-clusterroles.yaml new file mode 100644 index 000000000..3d41fb9b5 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-managed-clusterroles.yaml @@ -0,0 +1,279 @@ +{{- if .Values.rbacManager.deploy }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "name" . }}-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "name" . }}-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ template "name" . }}:masters +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}-admin + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-admin: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}-edit + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-edit: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}-view + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-view: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}-browse + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.crossplane.io/aggregate-to-browse: "true" +{{- if not .Values.rbacManager.skipAggregatedClusterRoles }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-admin + labels: + rbac.crossplane.io/aggregate-to-admin: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane administrators have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane administrators must create provider credential secrets, and may +# need to read or otherwise interact with connection secrets. They may also need +# to create or annotate namespaces. +- apiGroups: [""] + resources: [secrets, namespaces] + verbs: ["*"] +# Crossplane administrators have access to view the roles that they may be able +# to grant to other subjects. +- apiGroups: [rbac.authorization.k8s.io] + resources: [clusterroles, roles] + verbs: [get, list, watch] +# Crossplane administrators have access to grant the access they have to other +# subjects. +- apiGroups: [rbac.authorization.k8s.io] + resources: [clusterrolebindings, rolebindings] + verbs: ["*"] +# Crossplane administrators have full access to built in Crossplane types. +- apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: ["*"] +- apiGroups: + - pkg.crossplane.io + resources: [providers, configurations, providerrevisions, configurationrevisions] + verbs: ["*"] +# Crossplane administrators have access to view CRDs in order to debug XRDs. +- apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [get, list, watch] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-edit + labels: + rbac.crossplane.io/aggregate-to-edit: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane editors have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane editors must create provider credential secrets, and may need to +# read or otherwise interact with connection secrets. +- apiGroups: [""] + resources: [secrets] + verbs: ["*"] +# Crossplane editors may see which namespaces exist, but not edit them. +- apiGroups: [""] + resources: [namespaces] + verbs: [get, list, watch] +# Crossplane editors have full access to built in Crossplane types. +- apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: ["*"] +- apiGroups: + - pkg.crossplane.io + resources: [providers, configurations, providerrevisions, configurationrevisions] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-view + labels: + rbac.crossplane.io/aggregate-to-view: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane viewers have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane viewers may see which namespaces exist. +- apiGroups: [""] + resources: [namespaces] + verbs: [get, list, watch] +# Crossplane viewers have read-only access to built in Crossplane types. +- apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: [get, list, watch] +- apiGroups: + - pkg.crossplane.io + resources: [providers, configurations, providerrevisions, configurationrevisions] + verbs: [get, list, watch] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-browse + labels: + rbac.crossplane.io/aggregate-to-browse: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane browsers have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane browsers have read-only access to compositions and XRDs. This +# allows them to discover and select an appropriate composition when creating a +# resource claim. +- apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: [get, list, watch] +{{- if .Values.rbacManager.managementPolicy }} +--- +# The below ClusterRoles are aggregated to the namespaced RBAC roles created by +# the Crossplane RBAC manager when it is running in --manage=All mode. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-ns-admin + labels: + rbac.crossplane.io/aggregate-to-ns-admin: "true" + rbac.crossplane.io/base-of-ns-admin: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane namespace admins have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane namespace admins may need to read or otherwise interact with +# resource claim connection secrets. +- apiGroups: [""] + resources: [secrets] + verbs: ["*"] +# Crossplane namespace admins have access to view the roles that they may be +# able to grant to other subjects. +- apiGroups: [rbac.authorization.k8s.io] + resources: [roles] + verbs: [get, list, watch] +# Crossplane namespace admins have access to grant the access they have to other +# subjects. +- apiGroups: [rbac.authorization.k8s.io] + resources: [rolebindings] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-ns-edit + labels: + rbac.crossplane.io/aggregate-to-ns-edit: "true" + rbac.crossplane.io/base-of-ns-edit: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane namespace editors have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +# Crossplane namespace editors may need to read or otherwise interact with +# resource claim connection secrets. +- apiGroups: [""] + resources: [secrets] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-ns-view + labels: + rbac.crossplane.io/aggregate-to-ns-view: "true" + rbac.crossplane.io/base-of-ns-view: "true" + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +# Crossplane namespace viewers have access to view events. +- apiGroups: [""] + resources: [events] + verbs: [get, list, watch] +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-serviceaccount.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-serviceaccount.yaml new file mode 100644 index 000000000..dfefe4050 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/rbac-manager-serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.rbacManager.deploy }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rbac-manager + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- end}} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/serviceaccount.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/serviceaccount.yaml new file mode 100644 index 000000000..d3d47223c --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/crossplane/serviceaccount.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "name" . }} + labels: + app: {{ template "name" . }} + chart: {{ template "chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range $index, $secret := .Values.imagePullSecrets }} +- name: {{ $secret }} +{{- end }} +{{ end }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/_helpers.tpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/_helpers.tpl new file mode 100644 index 000000000..4db04bfaa --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/_helpers.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "agent-name" -}} +{{- "upbound-agent" -}} +{{- end -}} + +{{/* +Labels - agent +*/}} +{{- define "labelsAgent" -}} +{{ include "labels" . }} +app.kubernetes.io/component: agent +{{- end }} + +{{/* +Selector labels - agent +*/}} +{{- define "selectorLabelsAgent" -}} +{{ include "selectorLabels" . }} +app.kubernetes.io/component: agent +{{- end }} + diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrole.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrole.yaml new file mode 100644 index 000000000..9dc24441e --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrole.yaml @@ -0,0 +1,40 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "agent-name" . }} + labels: + {{- include "labelsAgent" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["namespaces"] + resourceNames: ["kube-system"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "agent-name" . }}-impersonator + labels: + {{- include "labelsAgent" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["users"] + verbs: ["impersonate"] + resourceNames: ["upbound-cloud-impersonator"] + - apiGroups: ["authentication.k8s.io"] + resources: ["userextras/upbound-id"] + verbs: ["impersonate"] + - apiGroups: [""] + resources: ["groups"] + resourceNames: + # system:authenticated is required for calls to discovery API. Some Kubernetes + # clients like kubectl use it to figure out exactly which endpoints to call + # for given arguments. + - "system:authenticated" + - "upbound:view" +{{- if eq .Values.upbound.controlPlane.permission "edit" }} + - "upbound:edit" +{{- end }} + verbs: ["impersonate"] +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings-managed.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings-managed.yaml new file mode 100644 index 000000000..66e157620 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings-managed.yaml @@ -0,0 +1,34 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "name" . }}-view + labels: + {{- include "labelsAgent" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "name" . }}-view +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: upbound:view +{{- if eq .Values.upbound.controlPlane.permission "edit" }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "name" . }}-edit + labels: + {{- include "labelsAgent" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "name" . }}-edit +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: upbound:edit +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings.yaml new file mode 100644 index 000000000..ec99faf43 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterrolebindings.yaml @@ -0,0 +1,31 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "agent-name" . }} + labels: + {{- include "labelsAgent" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "agent-name" . }} +subjects: +- kind: ServiceAccount + name: {{ template "agent-name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "agent-name" . }}-impersonator + labels: + {{- include "labelsAgent" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "agent-name" . }}-impersonator +subjects: + - kind: ServiceAccount + name: {{ template "agent-name" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterroles-managed.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterroles-managed.yaml new file mode 100644 index 000000000..fcca427a0 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/clusterroles-managed.yaml @@ -0,0 +1,66 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +# There are more permissions in upstream aggregated ClusterRoles than we'd like +# to have, so, we have our own ClusterRoles with only the permissions we need. +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-view + labels: + rbac.crossplane.io/aggregate-to-view: "true" + {{- include "labelsAgent" . | nindent 4 }} +rules: + # Universal Crossplane viewers have access to view events. + - apiGroups: [""] + resources: [events] + verbs: [get, list, watch] + # Universal Crossplane viewers may see which namespaces exist. + - apiGroups: [""] + resources: [namespaces] + verbs: [get, list, watch] + # Universal Crossplane viewers may see CRDs installed in the cluster. + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [get, list, watch] + # Universal Crossplane viewers have read-only access to built in Crossplane types. + - apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: [get, list, watch] + - apiGroups: + - pkg.crossplane.io + resources: [providers, configurations, providerrevisions, configurationrevisions] + verbs: [get, list, watch] +{{- if eq .Values.upbound.controlPlane.permission "edit" }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "name" . }}:aggregate-to-edit + labels: + rbac.crossplane.io/aggregate-to-edit: "true" + {{- include "labelsAgent" . | nindent 4 }} +rules: + # Universal Crossplane editors have access to view events. + - apiGroups: [""] + resources: [events] + verbs: [get, list, watch] + # Universal Crossplane editors may see which namespaces exist, but not edit them. + - apiGroups: [""] + resources: [namespaces] + verbs: [get, list, watch] + # Universal Crossplane editors may see CRDs installed in the cluster. + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [get, list, watch] + # Universal Crossplane editors have full access to built in Crossplane types. + - apiGroups: + - apiextensions.crossplane.io + resources: ["*"] + verbs: ["*"] + - apiGroups: + - pkg.crossplane.io + resources: [providers, configurations, providerrevisions, configurationrevisions] + verbs: ["*"] +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/control-plane-token-secret.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/control-plane-token-secret.yaml new file mode 100644 index 000000000..897846ae7 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/control-plane-token-secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.upbound.controlPlane.token }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.upbound.controlPlane.tokenSecretName }} + labels: + {{- include "labels" . | nindent 4 }} +type: Opaque +data: + token: {{ .Values.upbound.controlPlane.token | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/deployment.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/deployment.yaml new file mode 100644 index 000000000..c8cfd788a --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/deployment.yaml @@ -0,0 +1,103 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "agent-name" . }} + labels: + {{- include "labelsAgent" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "selectorLabelsAgent" . | nindent 6 }} + template: + metadata: + labels: + {{- include "selectorLabelsAgent" . | nindent 8 }} + spec: + serviceAccountName: {{ template "agent-name" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range $index, $secret := .Values.imagePullSecrets }} + - name: {{ $secret }} + {{- end }} + {{ end }} + containers: + - name: agent + image: "{{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }}" + args: + - agent + - --tls-cert-file + - /etc/certs/upbound-agent/tls.crt + - --tls-key-file + - /etc/certs/upbound-agent/tls.key + - --xgql-ca-bundle-file + - /etc/certs/upbound-agent/ca.crt + - --nats-endpoint + - nats://{{ .Values.upbound.connectHost }}:{{ .Values.upbound.connectPort | default "443" }} + - --upbound-api-endpoint + - {{ .Values.upbound.apiURL }} + - --pod-name + - $(POD_NAME) + - --control-plane-token-path + - /etc/tokens/control-plane/token + {{- if .Values.agent.config.debugMode }} + - "--debug" + {{- end }} + {{- range $arg := .Values.agent.config.args }} + - {{ $arg }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + imagePullPolicy: {{ .Values.agent.image.pullPolicy }} + ports: + - name: agent + containerPort: 6443 + protocol: TCP + resources: + {{- toYaml .Values.agent.resources | nindent 12 }} +# TODO(muvaf): Disabled temporarily since we'd like to complete the installation +# even if the control plane token is not there, which makes these probes return +# false. +# readinessProbe: +# httpGet: +# scheme: HTTPS +# path: /readyz +# port: 6443 +# initialDelaySeconds: 5 +# timeoutSeconds: 5 +# periodSeconds: 5 +# failureThreshold: 3 +# livenessProbe: +# httpGet: +# scheme: HTTPS +# path: /livez +# port: 6443 +# initialDelaySeconds: 10 +# timeoutSeconds: 5 +# periodSeconds: 30 +# failureThreshold: 5 + volumeMounts: + - mountPath: /etc/certs/upbound-agent + name: certs + readOnly: true + - mountPath: /etc/tokens/control-plane + name: upbound-control-plane-token + readOnly: true + volumes: + - name: certs + secret: + defaultMode: 420 + secretName: upbound-agent-tls + - name: upbound-control-plane-token + secret: + defaultMode: 420 + secretName: {{ .Values.upbound.controlPlane.tokenSecretName }} + optional: true + items: + - key: token + path: token +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/role.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/role.yaml new file mode 100644 index 000000000..960bc4d48 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/role.yaml @@ -0,0 +1,66 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +--- +# We need to be able to read universal-crossplane-config configmap in the namespace +# where UXP is deployed to provide version/configuration information. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "agent-name" . }}-uxp-config + labels: + {{- include "labelsAgent" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["universal-crossplane-config"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "agent-name" . }}-uxp-config + labels: + {{- include "labelsAgent" . | nindent 4 }} +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: upbound:view +{{- if eq .Values.upbound.controlPlane.permission "edit" }} + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: upbound:edit +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "agent-name" . }}-uxp-config +{{- end }} +{{- if eq .Values.upbound.controlPlane.permission "edit" }} +--- +# We need to be able to manage Secrets in the namespace where UXP is deployed +# so that Secrets pointed by ProviderConfig objects can be created by the agent. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "agent-name" . }}-secret + labels: + {{- include "labelsAgent" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "agent-name" . }}-secret + labels: + {{- include "labelsAgent" . | nindent 4 }} +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: upbound:edit +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "agent-name" . }}-secret +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/service.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/service.yaml new file mode 100644 index 000000000..7e22879a3 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/service.yaml @@ -0,0 +1,16 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "agent-name" . }} + labels: + {{- include "labelsAgent" . | nindent 4 }} +spec: + selector: + {{- include "selectorLabelsAgent" . | nindent 4 }} + ports: + - port: 6443 + targetPort: 6443 + protocol: TCP + name: https +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/serviceaccount.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/serviceaccount.yaml new file mode 100644 index 000000000..fe136d5c0 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "agent-name" . }} + labels: + {{- include "labelsAgent" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/tls-secret.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/tls-secret.yaml new file mode 100644 index 000000000..19a5c9748 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/upbound-agent/tls-secret.yaml @@ -0,0 +1,9 @@ +{{- if or (eq .Values.upbound.controlPlane.permission "view") (eq .Values.upbound.controlPlane.permission "edit") }} +apiVersion: v1 +kind: Secret +metadata: + name: upbound-agent-tls + labels: + {{- include "labelsAgent" . | nindent 4 }} +type: Opaque +{{- end }} \ No newline at end of file diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/_helpers.tpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/_helpers.tpl new file mode 100644 index 000000000..bd1141516 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/_helpers.tpl @@ -0,0 +1,22 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "xgql-name" -}} +{{- "xgql" -}} +{{- end -}} + +{{/* +Labels - xgql +*/}} +{{- define "labelsXgql" -}} +{{ include "labels" . }} +app.kubernetes.io/component: xgql +{{- end }} + +{{/* +Selector labels - xgql +*/}} +{{- define "selectorLabelsXgql" -}} +{{ include "selectorLabels" . }} +app.kubernetes.io/component: xgql +{{- end }} + diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/deployment.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/deployment.yaml new file mode 100644 index 000000000..520345385 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "xgql-name" . }} + labels: + {{- include "labelsXgql" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "selectorLabelsXgql" . | nindent 6 }} + template: + metadata: + labels: + {{- include "selectorLabelsXgql" . | nindent 8 }} + spec: + serviceAccountName: {{ template "xgql-name" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range $index, $secret := .Values.imagePullSecrets }} + - name: {{ $secret }} + {{- end }} + {{ end }} + containers: + - name: xgql + image: "{{ .Values.xgql.image.repository }}:{{ .Values.xgql.image.tag }}" + imagePullPolicy: {{ .Values.xgql.image.pullPolicy }} + resources: + {{- toYaml .Values.xgql.resources | nindent 12 }} + ports: + - name: https + containerPort: 8443 + protocol: TCP + {{- if .Values.xgql.metrics.enabled }} + - name: metrics + containerPort: 8080 + {{- end }} + args: + - --tls-key=/etc/certs/xgql/tls.key + - --tls-cert=/etc/certs/xgql/tls.crt + {{- if .Values.xgql.config.debugMode }} + - "--debug" + {{- end }} + {{- range $arg := .Values.xgql.config.args }} + - {{ $arg }} + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - mountPath: /etc/certs/xgql + name: certs + readOnly: true + volumes: + - name: certs + secret: + defaultMode: 420 + secretName: xgql-tls diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/service.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/service.yaml new file mode 100644 index 000000000..80f822d3c --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "xgql-name" . }} + labels: + {{- include "labelsXgql" . | nindent 4 }} +spec: + selector: + {{- include "selectorLabelsXgql" . | nindent 4 }} + ports: + - port: 443 + targetPort: https + protocol: TCP + name: https diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/serviceaccount.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/serviceaccount.yaml new file mode 100644 index 000000000..88e8bbdb7 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "xgql-name" . }} + labels: + {{- include "labelsXgql" . | nindent 4 }} diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/tls-secret.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/tls-secret.yaml new file mode 100644 index 000000000..4b06ca735 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/templates/xgql/tls-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: xgql-tls + labels: + {{- include "labelsXgql" . | nindent 4 }} +type: Opaque diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml b/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml new file mode 100644 index 000000000..f5ed73d78 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml @@ -0,0 +1,152 @@ +nameOverride: "crossplane" + +replicas: 1 + +deploymentStrategy: RollingUpdate + +image: + repository: crossplane/crossplane + tag: v1.2.2 + pullPolicy: IfNotPresent + +nodeSelector: {} +tolerations: {} +affinity: {} + +leaderElection: true +args: {} + +provider: + packages: [] + +configuration: + packages: [] + +imagePullSecrets: + - dockerhub + +rbacManager: + deploy: true + skipAggregatedClusterRoles: true + replicas: 1 + managementPolicy: Basic + leaderElection: true + args: {} + nodeSelector: {} + tolerations: {} + affinity: {} + +priorityClassName: "" + +resourcesCrossplane: + limits: + cpu: 100m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + +securityContextCrossplane: + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +packageCache: + medium: "" + sizeLimit: 5Mi + pvc: "" + +resourcesRBACManager: + limits: + cpu: 100m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + +securityContextRBACManager: + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +alpha: + oam: + enabled: false + +metrics: + enabled: false + +# List of extra environment variables to set in the crossplane deployment. +# EXAMPLE +# extraEnvironmentVars: +# sample.key=value1 +# ANOTHER.KEY=value2 +# RESULT +# - name: sample_key +# value: "value1" +# - name: ANOTHER_KEY +# value: "value2" +extraEnvVarsCrossplane: {} + +# List of extra environment variables to set in the crossplane rbac manager deployment. +# EXAMPLE +# extraEnvironmentVars: +# sample.key=value1 +# ANOTHER.KEY=value2 +# RESULT +# - name: sample_key +# value: "value1" +# - name: ANOTHER_KEY +# value: "value2" +extraEnvVarsRBACManager: {} + +### Agent Values + +upbound: + apiURL: "https://api.upbound.io" + connectHost: "connect.upbound.io" + controlPlane: + permission: edit + tokenSecretName: upbound-control-plane-token + token: "" + +xgql: + image: + repository: upbound/xgql + tag: v0.1.3 + pullPolicy: IfNotPresent + resources: {} + metrics: + enabled: false + config: + debugMode: false + args: [] + +agent: + image: + repository: upbound/upbound-agent + tag: v1.2.2-up.1 + pullPolicy: IfNotPresent + resources: {} + config: + debugMode: false + args: [] + +### Bootstrapper Values + +bootstrapper: + image: + repository: upbound/uxp-bootstrapper + tag: v1.2.2-up.1 + pullPolicy: IfNotPresent + resources: {} + config: + debugMode: false + args: [] + +billing: + awsMarketplace: + enabled: false + iamRoleARN: arn:aws:iam:::role/ diff --git a/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml.tmpl b/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml.tmpl new file mode 100644 index 000000000..a9a0a3389 --- /dev/null +++ b/charts/universal-crossplane/universal-crossplane/1.2.200100/values.yaml.tmpl @@ -0,0 +1,152 @@ +nameOverride: "crossplane" + +replicas: 1 + +deploymentStrategy: RollingUpdate + +image: + repository: crossplane/crossplane + tag: %%CROSSPLANE_TAG%% + pullPolicy: IfNotPresent + +nodeSelector: {} +tolerations: {} +affinity: {} + +leaderElection: true +args: {} + +provider: + packages: [] + +configuration: + packages: [] + +imagePullSecrets: + - dockerhub + +rbacManager: + deploy: true + skipAggregatedClusterRoles: true + replicas: 1 + managementPolicy: Basic + leaderElection: true + args: {} + nodeSelector: {} + tolerations: {} + affinity: {} + +priorityClassName: "" + +resourcesCrossplane: + limits: + cpu: 100m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + +securityContextCrossplane: + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +packageCache: + medium: "" + sizeLimit: 5Mi + pvc: "" + +resourcesRBACManager: + limits: + cpu: 100m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + +securityContextRBACManager: + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +alpha: + oam: + enabled: false + +metrics: + enabled: false + +# List of extra environment variables to set in the crossplane deployment. +# EXAMPLE +# extraEnvironmentVars: +# sample.key=value1 +# ANOTHER.KEY=value2 +# RESULT +# - name: sample_key +# value: "value1" +# - name: ANOTHER_KEY +# value: "value2" +extraEnvVarsCrossplane: {} + +# List of extra environment variables to set in the crossplane rbac manager deployment. +# EXAMPLE +# extraEnvironmentVars: +# sample.key=value1 +# ANOTHER.KEY=value2 +# RESULT +# - name: sample_key +# value: "value1" +# - name: ANOTHER_KEY +# value: "value2" +extraEnvVarsRBACManager: {} + +### Agent Values + +upbound: + apiURL: "https://api.upbound.io" + connectHost: "connect.upbound.io" + controlPlane: + permission: edit + tokenSecretName: upbound-control-plane-token + token: "" + +xgql: + image: + repository: upbound/xgql + tag: %%XGQL_TAG%% + pullPolicy: IfNotPresent + resources: {} + metrics: + enabled: false + config: + debugMode: false + args: [] + +agent: + image: + repository: upbound/upbound-agent + tag: %%AGENT_TAG%% + pullPolicy: IfNotPresent + resources: {} + config: + debugMode: false + args: [] + +### Bootstrapper Values + +bootstrapper: + image: + repository: upbound/uxp-bootstrapper + tag: %%BOOTSTRAPPER_TAG%% + pullPolicy: IfNotPresent + resources: {} + config: + debugMode: false + args: [] + +billing: + awsMarketplace: + enabled: false + iamRoleARN: arn:aws:iam:::role/ diff --git a/index.yaml b/index.yaml new file mode 100644 index 000000000..1b76f53e5 --- /dev/null +++ b/index.yaml @@ -0,0 +1,917 @@ +apiVersion: v1 +entries: + ambassador: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Ambassador Edge Stack + catalog.cattle.io/release-name: ambassador + apiVersion: v1 + appVersion: 1.13.8 + created: "2021-06-23T17:44:55.380609-07:00" + description: A Helm chart for Datawire Ambassador + digest: f56e602f017a6e48d2838033b31ce356a47db561fcd9c02e008d06b67be95b90 + home: https://www.getambassador.io/ + icon: https://www.getambassador.io/images/logo.png + keywords: + - api gateway + - ambassador + - datawire + - envoy + maintainers: + - email: markus@maga.se + name: flydiverny + - email: flynn@datawire.io + name: kflynn + - email: nkrause@datawire.io + name: nbkrause + - email: lukeshu@datawire.io + name: lukeshu + name: ambassador + sources: + - https://github.com/datawire/ambassador + - https://github.com/prometheus/statsd_exporter + urls: + - assets/ambassador/ambassador-6.7.1100.tgz + version: 6.7.1100 + artifactory-ha: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha + apiVersion: v1 + appVersion: 7.17.5 + created: "2021-06-23T17:44:55.406602-07:00" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.3.4 + description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. + digest: 63b4083aaf16e3f8f46c01943a6113b11beebdab0b3bd9e6b482ad3e8cc4e56a + home: https://www.jfrog.com/artifactory/ + icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png + keywords: + - artifactory + - jfrog + - devops + maintainers: + - email: installers@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-ha + sources: + - https://github.com/jfrog/charts + urls: + - assets/artifactory-ha/artifactory-ha-4.13.000.tgz + version: 4.13.000 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha + apiVersion: v1 + appVersion: 7.12.6 + created: "2021-06-23T17:44:55.417983-07:00" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.3.4 + description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. + digest: 6f13240e67c292e0a7229b1e0b1d8389991e10850d629fab7bac34b7f702fa3c + home: https://www.jfrog.com/artifactory/ + icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png + keywords: + - artifactory + - jfrog + - devops + maintainers: + - email: installers@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-ha + sources: + - https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view + - https://github.com/jfrog/charts + urls: + - assets/artifactory-ha/artifactory-ha-4.7.600.tgz + version: 4.7.600 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-ha + apiVersion: v1 + appVersion: 7.6.3 + created: "2021-06-23T17:44:55.395076-07:00" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 8.7.3 + description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. + digest: cfe8c5e0fbf007f8f858b65ab788ad297cdece703364d94ff9d36beca395ca6a + home: https://www.jfrog.com/artifactory/ + icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-ha/logo/artifactory-logo.png + keywords: + - artifactory + - jfrog + - devops + maintainers: + - email: amithk@jfrog.com + name: amithins + - email: daniele@jfrog.com + name: danielezer + - email: eldada@jfrog.com + name: eldada + - email: ramc@jfrog.com + name: chukka + - email: rimasm@jfrog.com + name: rimusz + name: artifactory-ha + sources: + - https://bintray.com/jfrog/product/JFrog-Artifactory-Pro/view + - https://github.com/jfrog/charts + urls: + - assets/artifactory-ha/artifactory-ha-3.0.1400.tgz + version: 3.0.1400 + artifactory-jcr: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-jcr + apiVersion: v1 + appVersion: 7.12.5 + created: "2021-06-23T17:44:55.44209-07:00" + dependencies: + - name: artifactory + repository: https://charts.jfrog.io/ + version: 11.7.4 + description: JFrog Container Registry + digest: 148af8042991b7d031770887a8d64e034268c2e1e3eb03f55e13310a40cb2a60 + home: https://jfrog.com/container-registry/ + icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-jcr/logo/jcr-logo.png + keywords: + - artifactory + - jfrog + - container + - registry + - devops + - jfrog-container-registry + maintainers: + - email: helm@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-jcr + sources: + - https://github.com/jfrog/charts + urls: + - assets/artifactory-jcr/artifactory-jcr-3.4.000.tgz + version: 3.4.000 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: artifactory-jcr + apiVersion: v1 + appVersion: 7.6.3 + created: "2021-06-23T17:44:55.431809-07:00" + dependencies: + - name: artifactory + repository: https://charts.jfrog.io/ + version: 10.0.12 + description: JFrog Container Registry + digest: 4f32c8460467e79492bfab5da99afbd5867f6e8dc305d96458790b6de083f4da + home: https://jfrog.com/container-registry/ + icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-jcr/logo/jcr-logo.png + keywords: + - artifactory + - jfrog + - container + - registry + - devops + - jfrog-container-registry + maintainers: + - email: amithk@jfrog.com + name: amithins + - email: daniele@jfrog.com + name: danielezer + - email: eldada@jfrog.com + name: eldada + - email: ramc@jfrog.com + name: chukka + - email: rimasm@jfrog.com + name: rimusz + - email: vinaya@jfrog.com + name: vinaya + name: artifactory-jcr + sources: + - https://github.com/jfrog/charts + urls: + - assets/artifactory-jcr/artifactory-jcr-2.5.100.tgz + version: 2.5.100 + citrix-adc-istio-ingress-gateway: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: citrix-adc-istio-ingress-gateway + apiVersion: v1 + appVersion: 1.2.1 + created: "2021-06-23T17:44:55.442927-07:00" + description: A Helm chart for Citrix ADC as Ingress Gateway installation in Istio + Service Mesh on Kubernetes platform + digest: 41121dad6ac7271f2ada14e5f8cbc7d398e1e656db95e1937ab1dc5bab563e4c + home: https://www.citrix.com + icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png + maintainers: + - email: dhiraj.gedam@citrix.com + name: dheerajng + - email: subash.dangol@citrix.com + name: subashd + name: citrix-adc-istio-ingress-gateway + sources: + - https://github.com/citrix/citrix-istio-adaptor + urls: + - assets/citrix-adc-istio-ingress-gateway/citrix-adc-istio-ingress-gateway-1.2.100.tgz + version: 1.2.100 + citrix-cpx-with-ingress-controller: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: citrix-cpx-with-ingress-controller + apiVersion: v1 + appVersion: 1.8.28 + created: "2021-06-23T17:44:55.443962-07:00" + description: A Helm chart for Citrix ADC CPX with Citrix ingress Controller running + as sidecar. + digest: 298c1472ff1afea8333346f2d67dc4bb6fb64779b4b90378b18e57180995286e + home: https://www.citrix.com + icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png + maintainers: + - email: priyanka.sharma@citrix.com + name: priyankash-citrix + - email: subash.dangol@citrix.com + name: subashd + name: citrix-cpx-with-ingress-controller + sources: + - https://github.com/citrix/citrix-k8s-ingress-controller + urls: + - assets/citrix-cpx-with-ingress-controller/citrix-cpx-with-ingress-controller-1.8.2800.tgz + version: 1.8.2800 + citrix-k8s-cpx-ingress-controller: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/namespace: citrix-k8s-cpx-ingress-controller + catalog.cattle.io/release-name: citrix-k8s-cpx-ingress-controller + apiVersion: v1 + appVersion: 1.8.28 + created: "2020-09-10T18:19:56.040802801Z" + description: A Helm chart for Citrix ADC CPX with Citrix ingress Controller running + as sidecar. + digest: 0a54474018a40043d75aad6209bdc585a3ba2cd9d1fa6c2131536091ec99bfd0 + home: https://www.citrix.com + icon: https://raw.githubusercontent.com/citrix/citrix-helm-charts/gh-pages/icon.png + maintainers: + - email: priyanka.sharma@citrix.com + name: priyankash-citrix + - email: subash.dangol@citrix.com + name: subashd + name: citrix-k8s-cpx-ingress-controller + sources: + - https://github.com/citrix/citrix-k8s-ingress-controller + urls: + - assets/citrix-k8s-cpx-ingress-controller/citrix-k8s-cpx-ingress-controller-1.8.2800.tgz + version: 1.8.2800 + cloudcasa: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Cloudcasa + catalog.cattle.io/namespace: cloudcasa-io + catalog.cattle.io/release-name: cloudcasa + apiVersion: v2 + appVersion: "1.0" + created: "2021-06-23T17:44:55.447795-07:00" + description: CloudCasa backup service for Kubernetes and cloud native applications + digest: 9bb36abfa6db450688840c60a4181791da4f4d637f5a48e7aee93238f4d471c1 + home: https://cloudcasa.io + icon: https://partner-charts.rancher.io/assets/logos/cloudcasa.png + keywords: + - backup + - Catalogic + - CloudCasa + kubeVersion: '>=1.13.0-0' + maintainers: + - email: info@catalogicsoftware.com + name: catalogicsoftware + name: cloudcasa + urls: + - assets/cloudcasa/cloudcasa-1.tgz + version: "1" + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/namespace: cloudcasa-io + catalog.cattle.io/release-name: cloudcasa + apiVersion: v2 + appVersion: 0.1.0 + created: "2021-06-23T17:44:55.446342-07:00" + description: CloudCasa backup service for Kubernetes and cloud native applications + digest: be87ab1b0e0e9c74998d5d3e5041f75ac732174389ee3bf68a3d8016aace786f + home: https://cloudcasa.io + icon: https://partner-charts.rancher.io/assets/logos/cloudcasa.png + keywords: + - backup + - Catalogic + - CloudCasa + kubeVersion: '>=1.13.0-0' + maintainers: + - email: info@catalogicsoftware.com + name: catalogicsoftware + name: cloudcasa + urls: + - assets/cloudcasa/cloudcasa-0.1.000.tgz + version: 0.1.000 + cockroachdb: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: cockroachdb + apiVersion: v1 + appVersion: 20.1.3 + created: "2021-06-23T17:44:55.449351-07:00" + description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. + digest: ba272eab2f61dd699854035f1bfdfafb15cd0b99eefc2b8486702dc990202bea + home: https://www.cockroachlabs.com + icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png + maintainers: + - email: helm-charts@cockroachlabs.com + name: cockroachlabs + name: cockroachdb + sources: + - https://github.com/cockroachdb/cockroach + urls: + - assets/cockroachdb/cockroachdb-4.1.200.tgz + version: 4.1.200 + control-agent: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: streamsets + apiVersion: v1 + appVersion: 3.8.0 + created: "2021-06-23T17:44:55.488571-07:00" + description: Control Agent for managing StreamSets Control Hub Deployments + digest: 5289b93c60200cc9896b2e903c4143a8db1d312409ab86da5dc77df693bc395f + home: https://streamsets.com + icon: https://github.com/streamsets/datacollector/raw/master/basic-lib/src/main/resources/sdcipc.png + keywords: + - streamsets + - sdc + - sch + maintainers: + - email: thomas.ganka@streamsets.com + name: thomasganka + name: control-agent + sources: + - https://github.com/streamsets/helm-charts/tree/master/incubating/control-agent + urls: + - assets/streamsets/control-agent-2.0.100.tgz + version: 2.0.100 + cost-analyzer: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: kubecost + apiVersion: v1 + appVersion: 1.70.0 + created: "2021-06-23T17:44:55.485546-07:00" + description: A Helm chart that sets up Kubecost, Prometheus, and Grafana to monitor + cloud costs. + digest: b633966fdce3fa9d0c899ff38b6090ac687ac1070000c2742e6834b7430c9975 + icon: https://kubecost.com/images/logo-white.png + name: cost-analyzer + urls: + - assets/kubecost/cost-analyzer-1.70.000.tgz + version: 1.70.000 + csi-wekafsplugin: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: csi-wekafsplugin + apiVersion: v2 + appVersion: 0.6.4 + created: "2021-06-23T17:44:55.449955-07:00" + description: Helm chart for Deployment of WekaIO Container Storage Interface (CSI) + plugin for WekaFS - the world fastest filesystem + digest: 78feaf34b9a8d8cb5ddcf3928e5782ecb6da0d4de94bdfc9b65162be3e357a7d + home: https://github.com/weka/csi-wekafs + icon: https://weka.github.io/csi-wekafs/logo.png + name: csi-wekafsplugin + sources: + - https://github.com/weka/csi-wekafs/tree/v0.6.4/deploy/helm/csi-wekafsplugin + type: application + urls: + - assets/csi-wekafs/csi-wekafsplugin-0.6.400.tgz + version: 0.6.400 + datadog: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: datadog + apiVersion: v1 + appVersion: "7" + created: "2021-06-23T17:44:55.454771-07:00" + dependencies: + - condition: datadog.kubeStateMetricsEnabled + name: kube-state-metrics + repository: https://charts.helm.sh/stable + version: =2.8.11 + description: Datadog Agent + digest: 5e05f58feb6bd16390bd3ed6f668d830bf134efee0dbec4a441f16f16e3a4122 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-2.4.200.tgz + version: 2.4.200 + dynatrace-oneagent-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: dynatrace-oneagent-operator + apiVersion: v2 + appVersion: 0.8.0 + created: "2021-06-23T17:44:55.4562-07:00" + description: The Dynatrace OneAgent Operator Helm chart for Kubernetes and Openshift + digest: 7daf37239c0ca6f903d0e92bcb0ffff02584872f45e2e83822bb986dbf61ee58 + home: https://www.dynatrace.com/ + icon: https://assets.dynatrace.com/global/resources/Signet_Logo_RGB_CP_512x512px.png + maintainers: + - email: marco.mader@dynatrace.com + name: DTMad + - email: luis.garcia@dynatrace.com + name: lrgar + - email: michael.mayr@dynatrace.com + name: mmayr-at + name: dynatrace-oneagent-operator + sources: + - https://github.com/Dynatrace/helm-charts + type: application + urls: + - assets/dynatrace-oneagent-operator/dynatrace-oneagent-operator-0.8.000.tgz + version: 0.8.000 + falcon-sensor: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: CrowdStrike Falcon Platform + catalog.cattle.io/release-name: falcon-helm + apiVersion: v2 + appVersion: 0.9.3 + created: "2021-06-23T17:44:55.456824-07:00" + description: A Helm chart to deploy CrowdStrike Falcon sensors into Kubernetes + clusters. + digest: cb98b5a7e6020ed2d06db01575e76b4cfd3e94549805323e65f551a832a1254a + home: https://crowdstrike.com + icon: https://raw.githubusercontent.com/CrowdStrike/falcon-helm/main/images/crowdstrike-logo.svg + keywords: + - CrowdStrike + - Falcon + - EDR + - kubernetes + - security + - monitoring + - alerting + maintainers: + - name: CrowdStrike Solution Architecture + - email: gabriel.alford@crowdstrike.com + name: Gabe Alford + name: falcon-sensor + sources: + - https://github.com/CrowdStrike/falcon-helm + type: application + urls: + - assets/falcon-sensor/falcon-sensor-0.9.300.tgz + version: 0.9.300 + federatorai: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Federator.ai + catalog.cattle.io/release-name: federatorai + apiVersion: v1 + appVersion: 4.5.1-ga + created: "2021-06-23T17:44:55.458574-07:00" + description: Federator.ai helps enterprises optimize cloud resources, maximize + application performance, and save significant cost without excessive over-provisioning + or under-provisioning of resources, meeting the service-level requirements of + their applications. + digest: bb91267948c7571fcc0ff6604bf950a17da2b4704d31c6a0033ce444d2c0399b + home: https://www.prophetstor.com + icon: https://raw.githubusercontent.com/prophetstor-ai/public/master/images/logo.png + keywords: + - AI + - Resource Orchestration + - NoOps + - AIOps + - Intelligent Workload Management + - Cost Optimization + maintainers: + - email: support@prophetstor.com + name: ProphetStor Data Services, Inc. + name: federatorai + sources: + - https://www.prophetstor.com + urls: + - assets/federatorai/federatorai-4.5.100.tgz + version: 4.5.100 + haproxy: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy + apiVersion: v1 + appVersion: 1.5.4 + created: "2021-06-23T17:44:55.461488-07:00" + description: A Helm chart for HAProxy Kubernetes Ingress Controller + digest: fd110caa557e3b385d407578a4e7693429d5bc722d233f51f19ca58840372ca7 + home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png + keywords: + - ingress + - haproxy + kubeVersion: '>=1.12.0-0' + maintainers: + - email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi + - email: bassmann@haproxy.com + name: Baptiste Assmann + - email: dkorunic@haproxy.com + name: Dinko Korunic + name: haproxy + sources: + - https://github.com/haproxytech/kubernetes-ingress + urls: + - assets/haproxy/haproxy-1.12.500.tgz + version: 1.12.500 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy + apiVersion: v1 + appVersion: 1.5.1 + created: "2021-06-23T17:44:55.46002-07:00" + description: A Helm chart for HAProxy Kubernetes Ingress Controller + digest: 29aa101f4851cac5b94d2de40c961d0f24c90bb361c0bf1bc17d3244ddf92046 + home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png + keywords: + - ingress + - haproxy + kubeVersion: '>=1.12.0-0' + maintainers: + - email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi + - email: bassmann@haproxy.com + name: Baptiste Assmann + - email: dkorunic@haproxy.com + name: Dinko Korunic + name: haproxy + sources: + - https://github.com/haproxytech/kubernetes-ingress + urls: + - assets/haproxy/haproxy-1.12.100.tgz + version: 1.12.100 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: haproxy + apiVersion: v1 + appVersion: 1.4.6 + created: "2021-06-23T17:44:55.462834-07:00" + description: A Helm chart for HAProxy Kubernetes Ingress Controller + digest: f4b11d983e29c3748e04fba10d626277cc4c35c977a2bda016925a326af38b54 + home: https://github.com/haproxytech/helm-charts/tree/master/kubernetes-ingress + icon: http://www.haproxy.org/img/HAProxyCommunityEdition_60px.png + keywords: + - ingress + - haproxy + kubeVersion: '>=1.12.0-0' + maintainers: + - email: mmhedhbi@haproxy.com + name: Moemen Mhedhbi + - email: bassmann@haproxy.com + name: Baptiste Assmann + - email: dkorunic@haproxy.com + name: Dinko Korunic + name: haproxy + sources: + - https://github.com/haproxytech/kubernetes-ingress + urls: + - assets/haproxy/haproxy-1.4.300.tgz + version: 1.4.300 + hpe-csi-driver: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-csi-driver + apiVersion: v1 + appVersion: 1.4.0 + created: "2021-06-23T17:44:55.464439-07:00" + description: A Helm chart for installing the HPE CSI Driver for Kubernetes + digest: 487dca3d6bdf6961bf29425945b40974667a67723d2a8d9edbca87285e628793 + home: https://hpe.com/storage/containers + icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png + keywords: + - HPE + - Storage + - StorageClass + maintainers: + - email: hpe-containers-dev@hpe.com + name: raunakkumar + name: hpe-csi-driver + sources: + - https://scod.hpedev.io/csi_driver + urls: + - assets/hpe-csi-driver/hpe-csi-driver-1.4.200.tgz + version: 1.4.200 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-csi-driver + apiVersion: v1 + appVersion: 1.3.0 + created: "2021-06-23T17:44:55.463517-07:00" + description: A Helm chart for installing the HPE CSI Driver for Kubernetes + digest: f5e5ce5e51d1b76ea667aca7e7689ccf9439825a30485fa2372ca0b9b86c7af0 + home: https://hpe.com/storage/containers + icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png + keywords: + - HPE + - Storage + - StorageClass + - CentOS + - Ubuntu + - RHEL + maintainers: + - email: hpe-containers-dev@hpe.com + name: shivamerla + name: hpe-csi-driver + sources: + - https://scod.hpedev.io/csi_driver + urls: + - assets/hpe-csi-driver/hpe-csi-driver-1.3.000.tgz + version: 1.3.000 + hpe-flexvolume-driver: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: hpe-flexvolume-driver + apiVersion: v1 + appVersion: "3.1" + created: "2021-06-23T17:44:55.4653-07:00" + description: A Helm chart for installing the HPE Volume Driver for Kubernetes + FlexVolume plugin + digest: 50fc38e25308bf32156bed37bd549a5855309ac69945ff402fbe5ea809f88ddc + home: https://hpe.com/storage/containers + icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png + keywords: + - HPE + - Storage + - StorageClass + - CentOS + - Ubuntu + - CloudVolumes + maintainers: + - email: hpe-containers-dev@hpe.com + name: shivamerla + name: hpe-flexvolume-driver + sources: + - https://github.com/hpe-storage/flexvolume-driver + urls: + - assets/hpe-flexvolume-driver/hpe-flexvolume-driver-3.1.000.tgz + version: 3.1.000 + instana-agent: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: instana-agent + apiVersion: v1 + appVersion: "1.1" + created: "2021-06-23T17:44:55.466349-07:00" + description: Instana Agent for Kubernetes + digest: 164723f111d03fe67c775d916b0bdf29691b29005b8d93da7caa210cf43cab9c + home: https://www.instana.com/ + icon: https://instana-management-assets.s3-eu-west-1.amazonaws.com/stan-logo-2020.png + maintainers: + - email: jon.brisbin@instana.com + name: jbrisbin + - email: william.james@instana.com + name: wiggzz + - email: jeroen.soeters@instana.com + name: JeroenSoeters + - email: fabian.staeber@instana.com + name: fstab + - email: miel.donkers@instana.com + name: mdonkers + - email: dahlia.bock@instana.com + name: dlbock + - email: nathan.fisher@instana.com + name: nfisher + name: instana-agent + sources: + - https://github.com/instana/instana-agent-docker + urls: + - assets/instana-agent/instana-agent-1.0.2900.tgz + version: 1.0.2900 + k8s-triliovault-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: TrilioVault for Kubernetes Operator + catalog.cattle.io/release-name: k8s-triliovault-operator + apiVersion: v1 + appVersion: v2.0.5 + created: "2021-06-23T17:44:55.467976-07:00" + description: K8s-TrilioVault-Operator is an operator designed to manage the K8s-TrilioVault + Application Lifecycle. + digest: e3272d943f70ec0c442c94920e4093fd0db9d1833711bcb8d23181f10098c000 + home: https://github.com/trilioData/k8s-triliovault-operator + icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png + maintainers: + - email: prafull.ladha@trilio.io + name: prafull11 + name: k8s-triliovault-operator + sources: + - https://github.com/trilioData/k8s-triliovault-operator + urls: + - assets/k8s-triliovault-operator/k8s-triliovault-operator-2.0.500.tgz + version: 2.0.500 + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: k8s-triliovault-operator + apiVersion: v1 + appVersion: v2.0.2 + created: "2021-06-23T17:44:55.469031-07:00" + description: K8s-TrilioVault-Operator is an operator designed to manage the K8s-TrilioVault + Application Lifecycle. + digest: 24d6699876b92315e0b3ce5bd4f171f315ad2f963316e4d8d4e4f6993b3e9021 + home: https://github.com/trilioData/k8s-triliovault-operator + icon: https://www.trilio.io/wp-content/uploads/2021/01/Trilio-2020-logo-RGB-gray-green.png + maintainers: + - email: prafull.ladha@trilio.io + name: prafull11 + name: k8s-triliovault-operator + sources: + - https://github.com/trilioData/k8s-triliovault-operator + urls: + - assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz + version: v2.0.200 + nutanix-csi-storage: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: nutanix-csi-storage + apiVersion: v1 + appVersion: 2.3.1 + created: "2021-06-23T17:44:55.486353-07:00" + description: A Helm chart for installing Nutanix CSI Volume Driver + digest: 319009a424d1748dc5e7e32e3c0a424621f9555b3ccb0a583f204c561078ef29 + home: https://github.com/nutanix/helm + icon: https://avatars2.githubusercontent.com/u/6165865?s=200&v=4 + keywords: + - Nutanix + - Storage + - Volumes + - Files + - StorageClass + - CentOS + - Ubuntu + kubeVersion: '>= 1.13.0' + maintainers: + - name: tuxtof + name: nutanix-csi-storage + urls: + - assets/nutanix-csi-storage/nutanix-csi-storage-2.3.100.tgz + version: 2.3.100 + openebs: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: openebs + apiVersion: v1 + appVersion: 1.12.0 + created: "2021-06-23T17:44:55.4876-07:00" + description: Containerized Storage for Containers + digest: fa46a4405ad4ad523d246d175bb48fc237c556bf606bb3d0af724920dc166bf6 + home: http://www.openebs.io/ + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png + keywords: + - cloud-native-storage + - block-storage + - iSCSI + - storage + maintainers: + - email: kiran.mova@openebs.io + name: kmova + - email: prateek.pandey@openebs.io + name: prateekpandey14 + name: openebs + sources: + - https://github.com/openebs/openebs + urls: + - assets/openebs/openebs-1.12.300.tgz + version: 1.12.300 + portshift-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: portshift-operator + apiVersion: v1 + appVersion: v0.1.3 + created: "2021-06-23T17:44:55.488041-07:00" + description: | + Portshift cloud-native security platform is an agentless security solution for containerized applications + digest: e332d44b698d4327c96780453f7de16e32e7b905e9f8797b05c02ba268536ed0 + home: https://www.portshift.io/ + icon: https://www.portshift.io/wp-content/uploads/2019/10/portshift-logo-68.png + keywords: + - portshift + - operator + - monitoring + - security + - alerting + - metric + - troubleshooting + - run-time + maintainers: + - email: idan@portshift.io + name: idan + name: portshift-operator + urls: + - assets/portshift-operator/portshift-operator-0.1.000.tgz + version: 0.1.000 + sysdig: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/release-name: sysdig + apiVersion: v1 + appVersion: 10.3.0 + created: "2021-06-23T17:44:55.489923-07:00" + description: Sysdig Monitor and Secure agent + digest: 37cef38a742229947b02dfc764da69ec382b260e062372f2fb7cc3056a31790f + home: https://www.sysdig.com/ + icon: https://478h5m1yrfsa3bbe262u7muv-wpengine.netdna-ssl.com/wp-content/uploads/2019/02/Shovel_600px.png + keywords: + - monitoring + - security + - alerting + - metric + - troubleshooting + - run-time + maintainers: + - email: lachlan@deis.com + name: lachie83 + - email: jorge.salamero@sysdig.com + name: bencer + - email: nestor.salceda@sysdig.com + name: nestorsalceda + - email: alvaro.iradier@sysdig.com + name: airadier + - email: carlos.arilla@sysdig.com + name: carillan81 + name: sysdig + sources: + - https://app.sysdigcloud.com/#/settings/user + - https://github.com/draios/sysdig + urls: + - assets/sysdig/sysdig-1.9.200.tgz + version: 1.9.200 + universal-crossplane: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Upbound Universal Crossplane + catalog.cattle.io/release-name: universal-crossplane + apiVersion: v1 + appVersion: 1.2.2001 + created: "2021-06-23T17:44:55.491386-07:00" + description: 'Upbound Universal Crossplane (UXP) is Upbound''s official enterprise-grade + distribution of Crossplane. It''s fully compatible with upstream Crossplane, + open source, capable of connecting to Upbound Cloud for real-time dashboard + visibility, and maintained by Upbound. It''s the easiest way for both individual + community members and enterprises to build their production control planes. ' + digest: 3c1dfa0f7f6181ab4101f23c41aadddb330484a1d6f48efcbbd523c6cf92eec9 + home: https://upbound.io + icon: https://raw.githubusercontent.com/upbound/universal-crossplane/66ce9eb2c5a0c3af8ed7d19551a2c4d743b933b9/docs/media/logo.png + keywords: + - cloud + - infrastructure + - services + - application + - database + - cache + - bucket + - infra + - app + - ops + - oam + - gcp + - azure + - aws + - alibaba + - cloudsql + - rds + - s3 + - azuredatabase + - asparadb + - gke + - aks + - eks + maintainers: + - email: info@upbound.io + name: Upbound Inc. + name: universal-crossplane + urls: + - assets/universal-crossplane/universal-crossplane-1.2.200100.tgz + version: 1.2.200100 +generated: "2021-06-23T17:44:55.374388-07:00"